From f4f2771277a04fcde57a1eb96c61516fbdb1cb17 Mon Sep 17 00:00:00 2001 From: barbacbd Date: Fri, 30 May 2025 16:04:35 -0400 Subject: [PATCH 1/2] CORS-4060: Migrate AWS sdk to v2 in infrastructure ** Vendor updates --- go.mod | 34 +- go.sum | 68 +- .../aws/aws-sdk-go-v2/aws/credential_cache.go | 11 + .../aws/aws-sdk-go-v2/aws/credentials.go | 57 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/user_agent.go | 112 +- .../aws/protocol/eventstream/CHANGELOG.md | 32 + .../eventstream/go_module_metadata.go | 2 +- .../aws/protocol/eventstream/message.go | 22 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 244 ++ .../aws/aws-sdk-go-v2/config/config.go | 11 +- .../aws/aws-sdk-go-v2/config/env_config.go | 245 +- .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/load_options.go | 97 +- .../aws/aws-sdk-go-v2/config/provider.go | 51 + .../aws/aws-sdk-go-v2/config/resolve.go | 46 + .../config/resolve_credentials.go | 111 +- .../aws/aws-sdk-go-v2/config/shared_config.go | 96 + .../aws-sdk-go-v2/credentials/CHANGELOG.md | 229 ++ .../credentials/ec2rolecreds/provider.go | 12 + .../endpointcreds/internal/client/client.go | 1 + .../credentials/endpointcreds/provider.go | 15 + .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 15 + .../credentials/ssocreds/sso_cached_token.go | 2 +- .../ssocreds/sso_credentials_provider.go | 13 + .../credentials/static_provider.go | 10 + .../stscreds/assume_role_provider.go | 18 + .../stscreds/web_identity_provider.go | 31 + .../feature/ec2/imds/CHANGELOG.md | 120 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 13 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.json | 12 +- .../internal/endpoints/v2/CHANGELOG.md | 13 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 12 + .../internal/ini/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 121 + .../internal/v4a/go_module_metadata.go | 2 +- .../internal/v4a/internal/v4/headers.go | 7 +- .../aws/aws-sdk-go-v2/internal/v4a/smithy.go | 8 +- .../internal/accept-encoding/CHANGELOG.md | 28 + .../accept-encoding/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 147 + .../service/internal/checksum/algorithms.go | 12 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/checksum/middleware_add.go | 67 +- .../middleware_checksum_metrics_tracking.go | 90 + .../middleware_compute_input_checksum.go | 152 +- .../checksum/middleware_setup_context.go | 54 +- .../checksum/middleware_validate_output.go | 16 +- .../internal/presigned-url/CHANGELOG.md | 121 + .../presigned-url/go_module_metadata.go | 2 +- .../service/internal/s3shared/CHANGELOG.md | 121 + .../internal/s3shared/go_module_metadata.go | 2 +- .../internal/s3shared/metadata_retriever.go | 5 + .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 310 ++ .../aws-sdk-go-v2/service/s3/api_client.go | 499 ++- .../service/s3/api_op_AbortMultipartUpload.go | 191 +- .../s3/api_op_CompleteMultipartUpload.go | 477 ++- .../service/s3/api_op_CopyObject.go | 793 +++-- .../service/s3/api_op_CreateBucket.go | 255 +- ..._CreateBucketMetadataTableConfiguration.go | 293 ++ .../s3/api_op_CreateMultipartUpload.go | 822 +++-- .../service/s3/api_op_CreateSession.go | 283 +- .../service/s3/api_op_DeleteBucket.go | 91 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 64 +- .../service/s3/api_op_DeleteBucketCors.go | 55 +- .../s3/api_op_DeleteBucketEncryption.go | 97 +- ...teBucketIntelligentTieringConfiguration.go | 73 +- ...i_op_DeleteBucketInventoryConfiguration.go | 66 +- .../s3/api_op_DeleteBucketLifecycle.go | 100 +- ..._DeleteBucketMetadataTableConfiguration.go | 237 ++ ...api_op_DeleteBucketMetricsConfiguration.go | 73 +- .../api_op_DeleteBucketOwnershipControls.go | 52 +- .../service/s3/api_op_DeleteBucketPolicy.go | 130 +- .../s3/api_op_DeleteBucketReplication.go | 68 +- .../service/s3/api_op_DeleteBucketTagging.go | 49 +- .../service/s3/api_op_DeleteBucketWebsite.go | 67 +- .../service/s3/api_op_DeleteObject.go | 238 +- .../service/s3/api_op_DeleteObjectTagging.go | 92 +- .../service/s3/api_op_DeleteObjects.go | 263 +- .../s3/api_op_DeletePublicAccessBlock.go | 61 +- ...api_op_GetBucketAccelerateConfiguration.go | 95 +- .../service/s3/api_op_GetBucketAcl.go | 91 +- .../api_op_GetBucketAnalyticsConfiguration.go | 70 +- .../service/s3/api_op_GetBucketCors.go | 91 +- .../service/s3/api_op_GetBucketEncryption.go | 98 +- ...etBucketIntelligentTieringConfiguration.go | 73 +- .../api_op_GetBucketInventoryConfiguration.go | 62 +- .../api_op_GetBucketLifecycleConfiguration.go | 142 +- .../service/s3/api_op_GetBucketLocation.go | 101 +- .../service/s3/api_op_GetBucketLogging.go | 52 +- ..._op_GetBucketMetadataTableConfiguration.go | 242 ++ .../api_op_GetBucketMetricsConfiguration.go | 71 +- ...i_op_GetBucketNotificationConfiguration.go | 99 +- .../s3/api_op_GetBucketOwnershipControls.go | 52 +- .../service/s3/api_op_GetBucketPolicy.go | 158 +- .../s3/api_op_GetBucketPolicyStatus.go | 65 +- .../service/s3/api_op_GetBucketReplication.go | 72 +- .../s3/api_op_GetBucketRequestPayment.go | 44 +- .../service/s3/api_op_GetBucketTagging.go | 53 +- .../service/s3/api_op_GetBucketVersioning.go | 58 +- .../service/s3/api_op_GetBucketWebsite.go | 55 +- .../service/s3/api_op_GetObject.go | 634 ++-- .../service/s3/api_op_GetObjectAcl.go | 117 +- .../service/s3/api_op_GetObjectAttributes.go | 360 ++- .../service/s3/api_op_GetObjectLegalHold.go | 70 +- .../s3/api_op_GetObjectLockConfiguration.go | 64 +- .../service/s3/api_op_GetObjectRetention.go | 70 +- .../service/s3/api_op_GetObjectTagging.go | 104 +- .../service/s3/api_op_GetObjectTorrent.go | 71 +- .../service/s3/api_op_GetPublicAccessBlock.go | 72 +- .../service/s3/api_op_HeadBucket.go | 264 +- .../service/s3/api_op_HeadObject.go | 680 ++-- ...pi_op_ListBucketAnalyticsConfigurations.go | 77 +- ...tBucketIntelligentTieringConfigurations.go | 73 +- ...pi_op_ListBucketInventoryConfigurations.go | 76 +- .../api_op_ListBucketMetricsConfigurations.go | 86 +- .../service/s3/api_op_ListBuckets.go | 192 +- .../service/s3/api_op_ListDirectoryBuckets.go | 94 +- .../service/s3/api_op_ListMultipartUploads.go | 318 +- .../service/s3/api_op_ListObjectVersions.go | 116 +- .../service/s3/api_op_ListObjects.go | 206 +- .../service/s3/api_op_ListObjectsV2.go | 310 +- .../service/s3/api_op_ListParts.go | 310 +- ...api_op_PutBucketAccelerateConfiguration.go | 102 +- .../service/s3/api_op_PutBucketAcl.go | 263 +- .../api_op_PutBucketAnalyticsConfiguration.go | 94 +- .../service/s3/api_op_PutBucketCors.go | 116 +- .../service/s3/api_op_PutBucketEncryption.go | 222 +- ...utBucketIntelligentTieringConfiguration.go | 97 +- .../api_op_PutBucketInventoryConfiguration.go | 126 +- .../api_op_PutBucketLifecycleConfiguration.go | 217 +- .../service/s3/api_op_PutBucketLogging.go | 149 +- .../api_op_PutBucketMetricsConfiguration.go | 77 +- ...i_op_PutBucketNotificationConfiguration.go | 112 +- .../s3/api_op_PutBucketOwnershipControls.go | 90 +- .../service/s3/api_op_PutBucketPolicy.go | 207 +- .../service/s3/api_op_PutBucketReplication.go | 158 +- .../s3/api_op_PutBucketRequestPayment.go | 86 +- .../service/s3/api_op_PutBucketTagging.go | 125 +- .../service/s3/api_op_PutBucketVersioning.go | 132 +- .../service/s3/api_op_PutBucketWebsite.go | 114 +- .../service/s3/api_op_PutObject.go | 794 +++-- .../service/s3/api_op_PutObjectAcl.go | 333 +- .../service/s3/api_op_PutObjectLegalHold.go | 96 +- .../s3/api_op_PutObjectLockConfiguration.go | 88 +- .../service/s3/api_op_PutObjectRetention.go | 104 +- .../service/s3/api_op_PutObjectTagging.go | 146 +- .../service/s3/api_op_PutPublicAccessBlock.go | 103 +- .../service/s3/api_op_RestoreObject.go | 251 +- .../service/s3/api_op_SelectObjectContent.go | 170 +- .../service/s3/api_op_UploadPart.go | 477 ++- .../service/s3/api_op_UploadPartCopy.go | 445 ++- .../s3/api_op_WriteGetObjectResponse.go | 240 +- .../aws/aws-sdk-go-v2/service/s3/auth.go | 49 +- .../aws-sdk-go-v2/service/s3/deserializers.go | 1673 +++++++++- .../aws/aws-sdk-go-v2/service/s3/endpoints.go | 2743 ++++++++++++++--- .../service/s3/express_user_agent.go | 43 + .../aws-sdk-go-v2/service/s3/generated.json | 6 +- .../service/s3/go_module_metadata.go | 2 +- .../s3/internal/endpoints/endpoints.go | 97 +- .../aws/aws-sdk-go-v2/service/s3/options.go | 49 +- .../aws-sdk-go-v2/service/s3/presign_post.go | 419 +++ .../aws-sdk-go-v2/service/s3/serializers.go | 1717 +++++++++-- .../aws-sdk-go-v2/service/s3/types/enums.go | 355 ++- .../aws-sdk-go-v2/service/s3/types/errors.go | 140 +- .../aws-sdk-go-v2/service/s3/types/types.go | 2144 +++++++++---- .../aws-sdk-go-v2/service/s3/uri_context.go | 23 + .../aws-sdk-go-v2/service/s3/validators.go | 197 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 185 ++ .../aws-sdk-go-v2/service/sso/api_client.go | 416 ++- .../service/sso/api_op_GetRoleCredentials.go | 31 +- .../service/sso/api_op_ListAccountRoles.go | 50 +- .../service/sso/api_op_ListAccounts.go | 55 +- .../service/sso/api_op_Logout.go | 55 +- .../aws/aws-sdk-go-v2/service/sso/auth.go | 43 +- .../service/sso/deserializers.go | 39 +- .../aws/aws-sdk-go-v2/service/sso/doc.go | 22 +- .../aws-sdk-go-v2/service/sso/endpoints.go | 27 +- .../aws-sdk-go-v2/service/sso/generated.json | 3 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 41 +- .../aws/aws-sdk-go-v2/service/sso/options.go | 39 +- .../aws-sdk-go-v2/service/sso/serializers.go | 33 +- .../aws-sdk-go-v2/service/sso/types/types.go | 20 +- .../service/ssooidc/CHANGELOG.md | 190 ++ .../service/ssooidc/api_client.go | 416 ++- .../service/ssooidc/api_op_CreateToken.go | 93 +- .../ssooidc/api_op_CreateTokenWithIAM.go | 97 +- .../service/ssooidc/api_op_RegisterClient.go | 57 +- .../api_op_StartDeviceAuthorization.go | 35 +- .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 43 +- .../service/ssooidc/deserializers.go | 167 + .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 43 +- .../service/ssooidc/endpoints.go | 27 +- .../service/ssooidc/generated.json | 3 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 41 +- .../aws-sdk-go-v2/service/ssooidc/options.go | 39 +- .../service/ssooidc/serializers.go | 81 + .../service/ssooidc/types/errors.go | 32 +- .../service/ssooidc/types/types.go | 13 + .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 200 ++ .../aws-sdk-go-v2/service/sts/api_client.go | 416 ++- .../service/sts/api_op_AssumeRole.go | 487 +-- .../service/sts/api_op_AssumeRoleWithSAML.go | 395 ++- .../sts/api_op_AssumeRoleWithWebIdentity.go | 426 ++- .../service/sts/api_op_AssumeRoot.go | 223 ++ .../sts/api_op_DecodeAuthorizationMessage.go | 69 +- .../service/sts/api_op_GetAccessKeyInfo.go | 73 +- .../service/sts/api_op_GetCallerIdentity.go | 50 +- .../service/sts/api_op_GetFederationToken.go | 341 +- .../service/sts/api_op_GetSessionToken.go | 128 +- .../aws/aws-sdk-go-v2/service/sts/auth.go | 43 +- .../service/sts/deserializers.go | 212 ++ .../aws/aws-sdk-go-v2/service/sts/doc.go | 12 +- .../aws-sdk-go-v2/service/sts/endpoints.go | 27 +- .../aws-sdk-go-v2/service/sts/generated.json | 4 +- .../service/sts/go_module_metadata.go | 2 +- .../sts/internal/endpoints/endpoints.go | 47 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 39 +- .../aws-sdk-go-v2/service/sts/serializers.go | 143 + .../aws-sdk-go-v2/service/sts/types/errors.go | 30 +- .../aws-sdk-go-v2/service/sts/types/types.go | 50 +- .../aws-sdk-go-v2/service/sts/validators.go | 42 + vendor/modules.txt | 68 +- 229 files changed, 28560 insertions(+), 7533 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go diff --git a/go.mod b/go.mod index a0c19aeb9f..2ec616c9ef 100644 --- a/go.mod +++ b/go.mod @@ -38,15 +38,15 @@ require ( github.com/apparentlymart/go-cidr v1.1.0 github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab github.com/aws/aws-sdk-go v1.55.5 - github.com/aws/aws-sdk-go-v2 v1.36.0 - github.com/aws/aws-sdk-go-v2/config v1.27.11 + github.com/aws/aws-sdk-go-v2 v1.36.3 + github.com/aws/aws-sdk-go-v2/config v1.29.14 github.com/aws/aws-sdk-go-v2/service/ec2 v1.159.0 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.11 github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.25.17 github.com/aws/aws-sdk-go-v2/service/route53 v1.48.6 - github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 + github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0 + github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 github.com/aws/smithy-go v1.22.3 github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e github.com/clarketm/json v1.14.1 @@ -172,19 +172,19 @@ require ( github.com/PaesslerAG/jsonpath v0.1.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator/v11 v11.0.2-0.20250122183457-e11347878e23 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect diff --git a/go.sum b/go.sum index 97322a8d3d..d3a975818c 100644 --- a/go.sum +++ b/go.sum @@ -159,50 +159,50 @@ github.com/awalterschulze/gographviz v0.0.0-20190522210029-fa59802746ab/go.mod h github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= -github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= -github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= +github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= github.com/aws/aws-sdk-go-v2/service/ec2 v1.159.0 h1:DmmVmiLPlcntOcjWMRwDPMNx/wi2kAVrf2ZmSN5gkAg= github.com/aws/aws-sdk-go-v2/service/ec2 v1.159.0/go.mod h1:xejKuuRDjz6z5OqyeLsz01MlOqqW7CqpAB4PabNvpu8= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.11 h1:lhPF/4q7oWvPaP5KxRJ3vfVQlwVFtzcbxhzOWkEdtf4= github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.43.11/go.mod h1:62zWGpej9djfF0vE7X9MVAA6MgzeFLJjhFYVT2ymjOM= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 h1:BCG7DCXEXpNCcpwCxg1oi9pkJWH2+eZzTn9MY56MbVw= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.25.17 h1:6qN0V27mvu9XPhIwpWKjz8lmBrCYoVx4i2lnh+pYnBs= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.25.17/go.mod h1:Ie+ps/S7x/54j5/XXbfwXCCasyv67QQX8SAeop2UMbI= github.com/aws/aws-sdk-go-v2/service/route53 v1.48.6 h1:O7L9iEodiF07vJoXShMrw2XyeAqZhLUIXqWEitCq6EE= github.com/aws/aws-sdk-go-v2/service/route53 v1.48.6/go.mod h1:E93uWfli9RToQzVA7+bYnynKOFcYOhNWqhY1hWSMZRc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0 h1:fV4XIU5sn/x8gjRouoJpDVHj+ExJaUk4prYF+eb6qTs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0/go.mod h1:qbn305Je/IofWBJ4bJz/Q7pDEtnnoInw/dGt71v6rHE= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go index 781ac0ae2c..623890e8d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -172,6 +172,17 @@ func (p *CredentialsCache) getCreds() (Credentials, bool) { return *c, true } +// ProviderSources returns a list of where the underlying credential provider +// has been sourced, if available. Returns empty if the provider doesn't implement +// the interface +func (p *CredentialsCache) ProviderSources() []CredentialSource { + asSource, ok := p.provider.(CredentialProviderSource) + if !ok { + return []CredentialSource{} + } + return asSource.ProviderSources() +} + // Invalidate will invalidate the cached credentials. The next call to Retrieve // will cause the provider's Retrieve method to be called. func (p *CredentialsCache) Invalidate() { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 98ba770564..4ad2ee4405 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -70,6 +70,56 @@ func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") } +// CredentialSource is the source of the credential provider. +// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to +// get credentials and then assumes a role using STS will have all these as part of its provider chain. +type CredentialSource int + +const ( + // CredentialSourceUndefined is the sentinel zero value + CredentialSourceUndefined CredentialSource = iota + // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance + CredentialSourceCode + // CredentialSourceEnvVars credentials resolved from environment variables + CredentialSourceEnvVars + // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token + CredentialSourceEnvVarsSTSWebIDToken + // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole + CredentialSourceSTSAssumeRole + // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML + CredentialSourceSTSAssumeRoleSaml + // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity + CredentialSourceSTSAssumeRoleWebID + // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token + CredentialSourceSTSFederationToken + // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S + CredentialSourceSTSSessionToken + // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials + CredentialSourceProfile + // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile + CredentialSourceProfileSourceProfile + // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer) + CredentialSourceProfileNamedProvider + // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile + CredentialSourceProfileSTSWebIDToken + // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile + CredentialSourceProfileSSO + // CredentialSourceSSO credentials resolved from an SSO session + CredentialSourceSSO + // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format + CredentialSourceProfileSSOLegacy + // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format + CredentialSourceSSOLegacy + // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile + CredentialSourceProfileProcess + // CredentialSourceProcess credentials resolved from a process + CredentialSourceProcess + // CredentialSourceHTTP credentials resolved from an HTTP endpoint + CredentialSourceHTTP + // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS) + CredentialSourceIMDS +) + // A Credentials is the AWS credentials value for individual credential fields. type Credentials struct { // AWS Access key ID @@ -125,6 +175,13 @@ type CredentialsProvider interface { Retrieve(ctx context.Context) (Credentials, error) } +// CredentialProviderSource allows any credential provider to track +// all providers where a credential provider were sourced. For example, if the credentials came from a +// call to a role specified in the profile, this method will give the whole breadcrumb trail +type CredentialProviderSource interface { + ProviderSources() []CredentialSource +} + // CredentialsProviderFunc provides a helper wrapping a function value to // satisfy the CredentialsProvider interface. type CredentialsProviderFunc func(context.Context) (Credentials, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 512d8d8bae..8e930fc6f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.0" +const goModuleVersion = "1.36.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 01d758d5ff..6ee3391be2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -76,30 +76,90 @@ type UserAgentFeature string // Enumerates UserAgentFeature. const ( - UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) - UserAgentFeatureWaiter = "B" - UserAgentFeaturePaginator = "C" - UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) - UserAgentFeatureRetryModeStandard = "E" - UserAgentFeatureRetryModeAdaptive = "F" - UserAgentFeatureS3Transfer = "G" - UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) - UserAgentFeatureS3CryptoV2 = "I" // n/a - UserAgentFeatureS3ExpressBucket = "J" - UserAgentFeatureS3AccessGrants = "K" // not yet implemented - UserAgentFeatureGZIPRequestCompression = "L" - UserAgentFeatureProtocolRPCV2CBOR = "M" - UserAgentFeatureRequestChecksumCRC32 = "U" - UserAgentFeatureRequestChecksumCRC32C = "V" - UserAgentFeatureRequestChecksumCRC64 = "W" - UserAgentFeatureRequestChecksumSHA1 = "X" - UserAgentFeatureRequestChecksumSHA256 = "Y" - UserAgentFeatureRequestChecksumWhenSupported = "Z" - UserAgentFeatureRequestChecksumWhenRequired = "a" - UserAgentFeatureResponseChecksumWhenSupported = "b" - UserAgentFeatureResponseChecksumWhenRequired = "c" + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + + UserAgentFeatureGZIPRequestCompression = "L" + + UserAgentFeatureProtocolRPCV2CBOR = "M" + + UserAgentFeatureAccountIDEndpoint = "O" // DO NOT IMPLEMENT: rules output is not currently defined. SDKs should not parse endpoints for feature information. + UserAgentFeatureAccountIDModePreferred = "P" + UserAgentFeatureAccountIDModeDisabled = "Q" + UserAgentFeatureAccountIDModeRequired = "R" + + UserAgentFeatureRequestChecksumCRC32 = "U" + UserAgentFeatureRequestChecksumCRC32C = "V" + UserAgentFeatureRequestChecksumCRC64 = "W" + UserAgentFeatureRequestChecksumSHA1 = "X" + UserAgentFeatureRequestChecksumSHA256 = "Y" + UserAgentFeatureRequestChecksumWhenSupported = "Z" + UserAgentFeatureRequestChecksumWhenRequired = "a" + UserAgentFeatureResponseChecksumWhenSupported = "b" + UserAgentFeatureResponseChecksumWhenRequired = "c" + + UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented + + UserAgentFeatureCredentialsCode = "e" + UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk) + UserAgentFeatureCredentialsEnvVars = "g" + UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h" + UserAgentFeatureCredentialsStsAssumeRole = "i" + UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented + UserAgentFeatureCredentialsStsAssumeRoleWebID = "k" + UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented + UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented + UserAgentFeatureCredentialsProfile = "n" + UserAgentFeatureCredentialsProfileSourceProfile = "o" + UserAgentFeatureCredentialsProfileNamedProvider = "p" + UserAgentFeatureCredentialsProfileStsWebIDToken = "q" + UserAgentFeatureCredentialsProfileSso = "r" + UserAgentFeatureCredentialsSso = "s" + UserAgentFeatureCredentialsProfileSsoLegacy = "t" + UserAgentFeatureCredentialsSsoLegacy = "u" + UserAgentFeatureCredentialsProfileProcess = "v" + UserAgentFeatureCredentialsProcess = "w" + UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python) + UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) + UserAgentFeatureCredentialsHTTP = "z" + UserAgentFeatureCredentialsIMDS = "0" ) +var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ + aws.CredentialSourceCode: UserAgentFeatureCredentialsCode, + aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars, + aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken, + aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole, + aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml, + aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID, + aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken, + aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken, + aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile, + aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile, + aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider, + aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken, + aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso, + aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso, + aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy, + aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy, + aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess, + aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess, + aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP, + aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS, +} + // RequestUserAgent is a build middleware that set the User-Agent for the request. type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder @@ -252,6 +312,14 @@ func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, val u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) } +// AddCredentialsSource adds the credential source as a feature on the User-Agent string +func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) { + x, ok := credentialSourceToFeature[source] + if ok { + u.AddUserAgentFeature(x) + } +} + // ID the name of the middleware. func (u *RequestUserAgent) ID() string { return "UserAgent" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 186c488924..ddb162b367 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,35 @@ +# v1.6.10 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.6.9 (2025-02-14) + +* **Bug Fix**: Remove max limit on event stream messages + +# v1.6.8 (2025-01-24) + +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.6.7 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. + +# v1.6.6 (2024-10-04) + +* No change notes available for this release. + +# v1.6.5 (2024-09-20) + +* No change notes available for this release. + +# v1.6.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.6.3 (2024-06-28) + +* No change notes available for this release. + # v1.6.2 (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index a0479b9be2..01981f4648 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.6.2" +const goModuleVersion = "1.6.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go index f7427da039..1a77654f7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -10,9 +10,6 @@ const preludeLen = 8 const preludeCRCLen = 4 const msgCRCLen = 4 const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen var crc32IEEETable = crc32.MakeTable(crc32.IEEE) @@ -82,28 +79,13 @@ func (p messagePrelude) PayloadLen() uint32 { } func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { + if p.Length == 0 { return LengthError{ Part: "message prelude", - Want: maxMsgLen, + Want: minMsgLen, Have: int(p.Length), } } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 87ea591aef..e7174e02b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,247 @@ +# v1.29.14 (2025-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.13 (2025-04-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.12 (2025-03-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.11 (2025-03-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.10 (2025-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.9 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.8 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.7 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.6 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.5 (2025-02-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.4 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.3 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.2 (2025-01-24) + +* **Bug Fix**: Fix env config naming and usage of deprecated ioutil +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.29.1 (2025-01-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2025-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2025-01-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.9 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2025-01-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-10-16) + +* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code. + +# v1.27.43 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.42 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.41 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.40 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.39 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.38 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.37 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.36 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.35 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.34 (2024-09-16) + +* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it + +# v1.27.33 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.32 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.31 (2024-08-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.30 (2024-08-23) + +* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file + +# v1.27.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.17 (2024-06-03) + +* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.27.11 (2024-04-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 50582d89d5..09d9b63116 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -80,6 +80,15 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile resolveRequestMinCompressSizeBytes, + + // Sets the AccountIDEndpointMode if present in env var or shared config profile + resolveAccountIDEndpointMode, + + // Sets the RequestChecksumCalculation if present in env var or shared config profile + resolveRequestChecksumCalculation, + + // Sets the ResponseChecksumValidation if present in env var or shared config profile + resolveResponseChecksumValidation, } // A Config represents a generic configuration value or set of values. This type @@ -209,7 +218,7 @@ func resolveConfigLoaders(options *LoadOptions) []loader { loaders[0] = loadEnvConfig // specification of a profile should cause a load failure if it doesn't exist - if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { + if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" { loaders[1] = loadSharedConfig } else { loaders[1] = loadSharedConfigIgnoreNotExist diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 88550198cc..9db507e38e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "strconv" "strings" @@ -21,83 +20,89 @@ const CredentialsSourceName = "EnvConfigCredentials" // Environment variables that will be read for configuration values. const ( - awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" - awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnv = "AWS_ACCESS_KEY" - awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" - awsSecretKeyEnvVar = "AWS_SECRET_KEY" + awsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnv = "AWS_SECRET_KEY" - awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + awsSessionTokenEnv = "AWS_SESSION_TOKEN" - awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" - awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" - awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + awsContainerCredentialsFullURIEnv = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativeURIEnv = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerAuthorizationTokenEnv = "AWS_CONTAINER_AUTHORIZATION_TOKEN" - awsRegionEnvVar = "AWS_REGION" - awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + awsRegionEnv = "AWS_REGION" + awsDefaultRegionEnv = "AWS_DEFAULT_REGION" - awsProfileEnvVar = "AWS_PROFILE" - awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + awsProfileEnv = "AWS_PROFILE" + awsDefaultProfileEnv = "AWS_DEFAULT_PROFILE" - awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + awsSharedCredentialsFileEnv = "AWS_SHARED_CREDENTIALS_FILE" - awsConfigFileEnvVar = "AWS_CONFIG_FILE" + awsConfigFileEnv = "AWS_CONFIG_FILE" - awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + awsCABundleEnv = "AWS_CA_BUNDLE" - awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + awsWebIdentityTokenFileEnv = "AWS_WEB_IDENTITY_TOKEN_FILE" - awsRoleARNEnvVar = "AWS_ROLE_ARN" - awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + awsRoleARNEnv = "AWS_ROLE_ARN" + awsRoleSessionNameEnv = "AWS_ROLE_SESSION_NAME" - awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + awsEnableEndpointDiscoveryEnv = "AWS_ENABLE_ENDPOINT_DISCOVERY" - awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + awsS3UseARNRegionEnv = "AWS_S3_USE_ARN_REGION" - awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + awsEc2MetadataServiceEndpointModeEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" - awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + awsEc2MetadataServiceEndpointEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" - awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" + awsEc2MetadataDisabledEnv = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnv = "AWS_EC2_METADATA_V1_DISABLED" - awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + awsS3DisableMultiRegionAccessPointsEnv = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" - awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + awsUseDualStackEndpointEnv = "AWS_USE_DUALSTACK_ENDPOINT" - awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + awsUseFIPSEndpointEnv = "AWS_USE_FIPS_ENDPOINT" - awsDefaultMode = "AWS_DEFAULTS_MODE" + awsDefaultsModeEnv = "AWS_DEFAULTS_MODE" - awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" - awsRetryMode = "AWS_RETRY_MODE" - awsSdkAppID = "AWS_SDK_UA_APP_ID" + awsMaxAttemptsEnv = "AWS_MAX_ATTEMPTS" + awsRetryModeEnv = "AWS_RETRY_MODE" + awsSdkUaAppIDEnv = "AWS_SDK_UA_APP_ID" - awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" - awsEndpointURL = "AWS_ENDPOINT_URL" + awsIgnoreConfiguredEndpointURLEnv = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURLEnv = "AWS_ENDPOINT_URL" - awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION" - awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" + awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION" + awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" + + awsAccountIDEnv = "AWS_ACCOUNT_ID" + awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" + + awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION" + awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION" ) var ( credAccessEnvKeys = []string{ - awsAccessKeyIDEnvVar, - awsAccessKeyEnvVar, + awsAccessKeyIDEnv, + awsAccessKeyEnv, } credSecretEnvKeys = []string{ - awsSecretAccessKeyEnvVar, - awsSecretKeyEnvVar, + awsSecretAccessKeyEnv, + awsSecretKeyEnv, } regionEnvKeys = []string{ - awsRegionEnvVar, - awsDefaultRegionEnvVar, + awsRegionEnv, + awsDefaultRegionEnv, } profileEnvKeys = []string{ - awsProfileEnvVar, - awsDefaultProfileEnvVar, + awsProfileEnv, + awsDefaultProfileEnv, } ) @@ -290,6 +295,15 @@ type EnvConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + // Indicates whether account ID will be required/ignored in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + + // Indicates whether request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Indicates whether response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -309,79 +323,80 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { - creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + creds.AccountID = os.Getenv(awsAccountIDEnv) + creds.SessionToken = os.Getenv(awsSessionTokenEnv) cfg.Credentials = creds } - cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) - cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) - cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv) setStringFromEnvVal(&cfg.Region, regionEnvKeys) setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) - cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) - cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv) - cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + cfg.CustomCABundle = os.Getenv(awsCABundleEnv) - cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFileEnv) - cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) - cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + cfg.RoleARN = os.Getenv(awsRoleARNEnv) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnv) - cfg.AppID = os.Getenv(awsSdkAppID) + cfg.AppID = os.Getenv(awsSdkUaAppIDEnv) - if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil { + if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompressionEnv}); err != nil { return cfg, err } - if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { + if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytesEnv}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil { return cfg, err } - if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnv}); err != nil { return cfg, err } - if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnv}); err != nil { return cfg, err } - setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) - if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabledEnv}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnv}); err != nil { return cfg, err } - cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) - if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnv) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnv}); err != nil { return cfg, err } - if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointsEnv}); err != nil { return cfg, err } - if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpointEnv}); err != nil { return cfg, err } - if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpointEnv}); err != nil { return cfg, err } - if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultsModeEnv}); err != nil { return cfg, err } - if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsMaxAttemptsEnv}); err != nil { return cfg, err } - if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryModeEnv}); err != nil { return cfg, err } - setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv}) - if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil { return cfg, err } @@ -389,6 +404,17 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { + return cfg, err + } + + if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil { + return cfg, err + } + if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil { + return cfg, err + } + return cfg, nil } @@ -417,6 +443,18 @@ func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, return *c.RequestMinCompressSizeBytes, true, nil } +func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + +func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) { + return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil +} + +func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) { + return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -491,6 +529,67 @@ func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { return nil } +func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch value { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) + } + break + } + return nil +} + +func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch strings.ToLower(value) { + case checksumWhenSupported: + *m = aws.RequestChecksumCalculationWhenSupported + case checksumWhenRequired: + *m = aws.RequestChecksumCalculationWhenRequired + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) + } + } + return nil +} + +func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch strings.ToLower(value) { + case checksumWhenSupported: + *m = aws.ResponseChecksumValidationWhenSupported + case checksumWhenRequired: + *m = aws.ResponseChecksumValidationWhenRequired + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value) + } + + } + return nil +} + // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { @@ -547,7 +646,7 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { return nil, false, nil } - b, err := ioutil.ReadFile(c.CustomCABundle) + b, err := os.ReadFile(c.CustomCABundle) if err != nil { return nil, false, err } @@ -571,7 +670,7 @@ func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { // GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use // with configured endpoints. func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { - if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" { return endpt, true, nil } return "", false, nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 46566e90b6..8be8c01e06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.11" +const goModuleVersion = "1.29.14" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 06596c1b7c..0810ecf16a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -215,6 +215,19 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool + + // Whether account id should be built into endpoint resolution + AccountIDEndpointMode aws.AccountIDEndpointMode + + // Specify if request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Specifies if response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation + + // Service endpoint override. This value is not necessarily final and is + // passed to the service's EndpointResolverV2 for further delegation. + BaseEndpoint string } func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { @@ -278,6 +291,31 @@ func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, return *o.RequestMinCompressSizeBytes, true, nil } +func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil +} + +func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { + return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil +} + +func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { + return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil +} + +func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) { + return o.BaseEndpoint, o.BaseEndpoint != "", nil +} + +// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider. +// +// The sdkID value is unused because LoadOptions only supports setting a GLOBAL +// endpoint override. In-code, per-service endpoint overrides are performed via +// functional options in service client space. +func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) { + return o.BaseEndpoint, o.BaseEndpoint != "", nil +} + // WithRegion is a helper function to construct functional options // that sets Region on config's LoadOptions. Setting the region to // an empty string, will result in the region value being ignored. @@ -323,6 +361,37 @@ func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOpt } } +// WithAccountIDEndpointMode is a helper function to construct functional options +// that sets AccountIDEndpointMode on config's LoadOptions +func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + if m != "" { + o.AccountIDEndpointMode = m + } + return nil + } +} + +// WithRequestChecksumCalculation is a helper function to construct functional options +// that sets RequestChecksumCalculation on config's LoadOptions +func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc { + return func(o *LoadOptions) error { + if c > 0 { + o.RequestChecksumCalculation = c + } + return nil + } +} + +// WithResponseChecksumValidation is a helper function to construct functional options +// that sets ResponseChecksumValidation on config's LoadOptions +func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ResponseChecksumValidation = v + return nil + } +} + // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { @@ -824,7 +893,14 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. // -// Deprecated: See WithEndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Use of +// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolver = v @@ -844,6 +920,9 @@ func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.En // that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [WithEndpointResolver]. func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolverWithOptions = v @@ -1112,3 +1191,19 @@ func WithS3DisableExpressAuth(v bool) LoadOptionsFunc { return nil } } + +// WithBaseEndpoint is a helper function to construct functional options that +// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and +// subsequent calls to this API override previous ones. +// +// This is an in-code setting, therefore, any value set using this hook takes +// precedence over and will override ALL environment and shared config +// directives that set endpoint URLs. Functional options on service clients +// have higher specificity, and functional options that modify the value of +// BaseEndpoint on a client will take precedence over this setting. +func WithBaseEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BaseEndpoint = v + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 13745fc98f..a8ff40d846 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -225,6 +225,57 @@ func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value return } +// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode +type accountIDEndpointModeProvider interface { + getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) +} + +func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(accountIDEndpointModeProvider); ok { + value, found, err = p.getAccountIDEndpointMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation +type requestChecksumCalculationProvider interface { + getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) +} + +func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(requestChecksumCalculationProvider); ok { + value, found, err = p.getRequestChecksumCalculation(ctx) + if err != nil || found { + break + } + } + } + return +} + +// responseChecksumValidationProvider provides access to the ResponseChecksumValidation +type responseChecksumValidationProvider interface { + getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) +} + +func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(responseChecksumValidationProvider); ok { + value, found, err = p.getResponseChecksumValidation(ctx) + if err != nil || found { + break + } + } + } + return +} + // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index fde2e3980e..a68bd0993f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -166,6 +166,52 @@ func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, co return nil } +// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's +// SharedConfig or EnvConfig +func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { + m, found, err := getAccountIDEndpointMode(ctx, configs) + if err != nil { + return err + } + + if !found { + m = aws.AccountIDEndpointModePreferred + } + + cfg.AccountIDEndpointMode = m + return nil +} + +// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's +// SharedConfig or EnvConfig +func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getRequestChecksumCalculation(ctx, configs) + if err != nil { + return err + } + + if !found { + c = aws.RequestChecksumCalculationWhenSupported + } + cfg.RequestChecksumCalculation = c + return nil +} + +// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's +// SharedConfig or EnvConfig +func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getResponseChecksumValidation(ctx, configs) + if err != nil { + return err + } + + if !found { + c = aws.ResponseChecksumValidationWhenSupported + } + cfg.ResponseChecksumValidation = c + return nil +} + // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 89368520f3..b00259df03 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -112,13 +112,15 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config switch { case sharedProfileSet: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) case envConfig.Credentials.HasKeys(): - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} case len(envConfig.WebIdentityTokenFilePath) > 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken) err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) default: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) } if err != nil { return err @@ -133,53 +135,71 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config return nil } -func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { - +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) { switch { case sharedConfig.Source != nil: + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile) // Assume IAM role with credentials source from a different profile. - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) case sharedConfig.Credentials.HasKeys(): // Static Credentials from Shared Config/Credentials file. + ctx = addCredentialSource(ctx, aws.CredentialSourceProfile) cfg.Credentials = credentials.StaticCredentialsProvider{ - Value: sharedConfig.Credentials, + Value: sharedConfig.Credentials, + Source: getCredentialSources(ctx), } case len(sharedConfig.CredentialSource) != 0: - err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider) + ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) case len(sharedConfig.WebIdentityTokenFile) != 0: // Credentials from Assume Web Identity token require an IAM Role, and // that roll will be assumed. May be wrapped with another assume role // via SourceProfile. - return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken) + return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) case sharedConfig.hasSSOConfiguration(): + if sharedConfig.hasLegacySSOConfiguration() { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy) + ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy) + } else { + ctx = addCredentialSource(ctx, aws.CredentialSourceSSO) + } + if sharedConfig.SSOSession != nil { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO) + } err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) case len(sharedConfig.CredentialProcess) != 0: // Get credentials from CredentialProcess + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess) + ctx = addCredentialSource(ctx, aws.CredentialSourceProcess) err = processCredentials(ctx, cfg, sharedConfig, configs) - case len(envConfig.ContainerCredentialsEndpoint) != 0: - err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) - case len(envConfig.ContainerCredentialsRelativePath) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + case len(envConfig.ContainerCredentialsEndpoint) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + default: + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) err = resolveEC2RoleCredentials(ctx, cfg, configs) } if err != nil { - return err + return ctx, err } if len(sharedConfig.RoleARN) > 0 { - return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs) } - return nil + return ctx, nil } func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { @@ -198,6 +218,10 @@ func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *S cfgCopy := cfg.Copy() + options = append(options, func(o *ssocreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + if sharedConfig.SSOSession != nil { ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) if err != nil { @@ -242,6 +266,10 @@ func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *Shar opts = append(opts, options) } + opts = append(opts, func(o *processcreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) return nil @@ -323,6 +351,7 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke if cfg.Retryer != nil { options.Retryer = cfg.Retryer() } + options.CredentialSources = getCredentialSources(ctx) }, } @@ -346,25 +375,31 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke return nil } -func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) { switch sharedCfg.CredentialSource { case credSourceEc2Metadata: - return resolveEC2RoleCredentials(ctx, cfg, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) + return ctx, resolveEC2RoleCredentials(ctx, cfg, configs) case credSourceEnvironment: - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} case credSourceECSContainer: - if len(envConfig.ContainerCredentialsRelativePath) == 0 { - return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + if len(envConfig.ContainerCredentialsRelativePath) != 0 { + return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) } - return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + if len(envConfig.ContainerCredentialsEndpoint) != 0 { + return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + } + return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") default: - return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") } - return nil + return ctx, nil } func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { @@ -383,6 +418,7 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con if o.Client == nil { o.Client = imds.NewFromConfig(*cfg) } + o.CredentialSources = getCredentialSources(ctx) }) provider := ec2rolecreds.New(optFns...) @@ -391,7 +427,6 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con if err != nil { return err } - return nil } @@ -470,6 +505,10 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro RoleARN: roleARN, } + optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) { + options.CredentialSources = getCredentialSources(ctx) + }) + for _, fn := range optFns { fn(&opts) } @@ -491,6 +530,8 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro } func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + // resolve credentials early + credentialSources := getCredentialSources(ctx) optFns := []func(*stscreds.AssumeRoleOptions){ func(options *stscreds.AssumeRoleOptions) { options.RoleSessionName = sharedCfg.RoleSessionName @@ -508,6 +549,9 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared if len(sharedCfg.MFASerial) != 0 { options.SerialNumber = aws.String(sharedCfg.MFASerial) } + + // add existing credential chain + options.CredentialSources = credentialSources }, } @@ -530,7 +574,6 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared return AssumeRoleTokenProviderNotSetError{} } } - cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) return nil @@ -564,3 +607,21 @@ func wrapWithCredentialsCache( return aws.NewCredentialsCache(provider, optFns...), nil } + +// credentialSource stores the chain of providers that was used to create an instance of +// a credentials provider on the context +type credentialSource struct{} + +func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context { + existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource) + if !ok { + existing = []aws.CredentialSource{source} + } else { + existing = append(existing, source) + } + return context.WithValue(ctx, credentialSource{}, existing) +} + +func getCredentialSources(ctx context.Context) []aws.CredentialSource { + return ctx.Value(credentialSource{}).([]aws.CredentialSource) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index c546cb7d0f..00b071fe6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -115,6 +115,14 @@ const ( requestMinCompressionSizeBytes = "request_min_compression_size_bytes" s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" + + accountIDKey = "aws_account_id" + accountIDEndpointMode = "account_id_endpoint_mode" + + requestChecksumCalculationKey = "request_checksum_calculation" + responseChecksumValidationKey = "response_checksum_validation" + checksumWhenSupported = "when_supported" + checksumWhenRequired = "when_required" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -341,6 +349,14 @@ type SharedConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode + + // RequestChecksumCalculation indicates if the request checksum should be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + + // ResponseChecksumValidation indicates if the response checksum should be validated + ResponseChecksumValidation aws.ResponseChecksumValidation } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1124,12 +1140,24 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) } + if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) + } + + if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err) + } + if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err) + } + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), SecretAccessKey: section.String(secretAccessKey), SessionToken: section.String(sessionTokenKey), Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + AccountID: section.String(accountIDKey), } if creds.HasKeys() { @@ -1177,6 +1205,62 @@ func updateDisableRequestCompression(disable **bool, sec ini.Section, key string return nil } +func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch v { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) + } + + return nil +} + +func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch strings.ToLower(v) { + case checksumWhenSupported: + *m = aws.RequestChecksumCalculationWhenSupported + case checksumWhenRequired: + *m = aws.RequestChecksumCalculationWhenRequired + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) + } + + return nil +} + +func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch strings.ToLower(v) { + case checksumWhenSupported: + *m = aws.ResponseChecksumValidationWhenSupported + case checksumWhenRequired: + *m = aws.ResponseChecksumValidationWhenRequired + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v) + } + + return nil +} + func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1191,6 +1275,18 @@ func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, b return *c.DisableRequestCompression, true, nil } +func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + +func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) { + return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil +} + +func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) { + return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil +} + func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 3b0bad426c..d4e4090782 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,232 @@ +# v1.17.67 (2025-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.66 (2025-04-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.65 (2025-03-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.64 (2025-03-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.63 (2025-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.62 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.61 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.60 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.59 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.58 (2025-02-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.57 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.56 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.55 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.17.54 (2025-01-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.53 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.52 (2025-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.51 (2025-01-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.50 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.49 (2025-01-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.48 (2024-12-19) + +* **Bug Fix**: Fix improper use of printf-style functions. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.47 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.46 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.45 (2024-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.44 (2024-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.43 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.42 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.41 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.40 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.39 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.38 (2024-10-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.37 (2024-09-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.36 (2024-09-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.35 (2024-09-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.34 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.33 (2024-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.32 (2024-09-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.31 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.30 (2024-08-26) + +* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility. + +# v1.17.29 (2024-08-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.28 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.11 (2024-04-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go index 5c699f1665..a95e6c8bdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -47,6 +47,10 @@ type Options struct { // // If nil, the provider will default to the EC2 IMDS client. Client GetMetadataAPIClient + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // New returns an initialized Provider value configured to retrieve @@ -227,3 +231,11 @@ func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName str return respCreds, nil } + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceIMDS} + } // If no source has been set, assume this is used directly which means just call to assume role + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go index 9a869f8954..dc291c97cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -128,6 +128,7 @@ type GetCredentialsOutput struct { AccessKeyID string SecretAccessKey string Token string + AccountID string } // EndpointError is an error returned from the endpoint service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 0c3c4d6826..c8ac6d9ff1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -98,6 +98,10 @@ type Options struct { // // Will override AuthorizationToken if configured AuthorizationTokenProvider AuthTokenProvider + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // AuthTokenProvider defines an interface to dynamically load a value to be passed @@ -152,6 +156,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.Token, Source: ProviderName, + AccountID: resp.AccountID, } if resp.Expiration != nil { @@ -190,3 +195,13 @@ func (p *Provider) resolveAuthToken() (string, error) { return authToken, nil } + +var _ aws.CredentialProviderSource = (*Provider)(nil) + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceHTTP} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 4cb3e3039f..96ab3b85e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.11" +const goModuleVersion = "1.17.67" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index fe9345e287..dfc6b2548e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -57,6 +57,9 @@ type Provider struct { type Options struct { // Timeout limits the time a process can run. Timeout time.Duration + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // NewCommandBuilder provides the interface for specifying how command will be @@ -167,6 +170,9 @@ type CredentialProcessResponse struct { // The date on which the current credentials expire. Expiration *time.Time + + // The ID of the account for credentials + AccountID string `json:"AccountId"` } // Retrieve executes the credential process command and returns the @@ -208,6 +214,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { AccessKeyID: resp.AccessKeyID, SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.SessionToken, + AccountID: resp.AccountID, } // Handle expiration @@ -270,6 +277,14 @@ func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) return out, nil } +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceProcess} + } + return p.options.CredentialSources +} + func executeCommand(cmd *exec.Cmd, exec chan error) { // Start the command err := cmd.Start() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go index 3b97e6dd40..46ae2f9231 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -225,7 +225,7 @@ func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { } func (r *rfc3339) MarshalJSON() ([]byte, error) { - value := time.Time(*r).Format(time.RFC3339) + value := time.Time(*r).UTC().Format(time.RFC3339) // Use JSON unmarshal to unescape the quoted value making use of JSON's // quoting rules. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go index b3cf7853e7..3ed9cbb3ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -49,6 +49,10 @@ type Options struct { // Used by the SSOCredentialProvider if a token configuration // profile is used in the shared config SSOTokenProvider *SSOTokenProvider + + // The chain of providers that was used to create this provider. + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // Provider is an AWS credential provider that retrieves temporary AWS @@ -129,9 +133,18 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { CanExpire: true, Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), Source: ProviderName, + AccountID: p.options.AccountID, }, nil } +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSSO} + } + return p.options.CredentialSources +} + // InvalidTokenError is the error type that is returned if loaded token has // expired or is otherwise invalid. To refresh the SSO session run AWS SSO // login with the corresponding profile. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go index d525cac096..a469abdb79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -22,6 +22,16 @@ func (*StaticCredentialsEmptyError) Error() string { // never expire. type StaticCredentialsProvider struct { Value aws.Credentials + // These values are for reporting purposes and are not meant to be set up directly + Source []aws.CredentialSource +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource { + if s.Source == nil { + return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds + } + return s.Source } // NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index 289707b6de..1ccf71e77e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -247,6 +247,10 @@ type AssumeRoleOptions struct { // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. This parameter is optional. TransitiveTagKeys []string + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // NewAssumeRoleProvider constructs and returns a credentials provider that @@ -308,6 +312,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err return aws.Credentials{Source: ProviderName}, err } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + return aws.Credentials{ AccessKeyID: *resp.Credentials.AccessKeyId, SecretAccessKey: *resp.Credentials.SecretAccessKey, @@ -316,5 +325,14 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, }, nil } + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole} + } // If no source has been set, assume this is used directly which means just call to assume role + return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index ddaf6df6ce..5f4286dda4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -63,6 +64,10 @@ type WebIdentityRoleOptions struct { // want to use as managed session policies. The policies must exist in the // same account as the role. PolicyARNs []types.PolicyDescriptorType + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // IdentityTokenRetriever is an interface for retrieving a JWT @@ -135,6 +140,11 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. @@ -145,6 +155,27 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials Source: WebIdentityProviderName, CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, } return value, nil } + +// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" +func getAccountID(u *types.AssumedRoleUser) string { + if u.Arn == nil { + return "" + } + parts := strings.Split(*u.Arn, ":") + if len(parts) < 5 { + return "" + } + return parts[4] +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 3807833dd4..1f69e820e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,123 @@ +# v1.16.30 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.29 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.28 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.27 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.26 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.25 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.16.24 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.23 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.22 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.21 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.20 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 5642306f87..dba9ef600e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.1" +const goModuleVersion = "1.16.30" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 113b1da37a..eae3e16af7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.31 (2025-01-31) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 28c635d67f..eddabe6344 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.31" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 43f6449be3..e19224f1b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -223,7 +223,17 @@ "supportsFIPS" : true }, "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "aws-iso-f-global" : { + "description" : "AWS ISOF global region" + }, + "us-isof-east-1" : { + "description" : "US ISOF EAST" + }, + "us-isof-south-1" : { + "description" : "US ISOF SOUTH" + } + } } ], "version" : "1.1" } \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 1cab71d505..83e5bd28a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,16 @@ +# v2.6.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.31 (2025-01-31) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 589a4a74d6..735dba7ac7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.31" +const goModuleVersion = "2.6.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index c0e54faff2..f729db535b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.8.3 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.8.2 (2025-01-24) + +* **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir + +# v1.8.1 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + # v1.8.0 (2024-02-13) * **Feature**: Bump minimum Go version to 1.20 per our language support policy. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 6e0b906c34..00df0e3cb9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.8.0" +const goModuleVersion = "1.8.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index 0f10e02283..c574867d0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,124 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2025-01-30) + +* **Bug Fix**: Do not sign Transfer-Encoding header in Sigv4[a]. Fixes a signer mismatch issue with S3 Accelerate. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.3.28 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 51aa32cf79..b6c06c7044 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.5" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go index 3487dc3352..688f834742 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -4,9 +4,10 @@ package v4 var IgnoredHeaders = Rules{ DenyList{ MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Transfer-Encoding": struct{}{}, }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go index 516d459d5d..af4f6abcfa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" @@ -72,7 +74,11 @@ func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + signingTime := sdk.NowTime() + if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { + signingTime.Add(skew) + } + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 9cf6cf22b4..c81265a25d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,31 @@ +# v1.12.3 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.12.2 (2025-01-24) + +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.12.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. + +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. + +# v1.11.5 (2024-09-20) + +* No change notes available for this release. + +# v1.11.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.11.3 (2024-06-28) + +* No change notes available for this release. + # v1.11.2 (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 6339b54191..d83e533eff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.2" +const goModuleVersion = "1.12.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 2246bd62ec..c5813c71dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,150 @@ +# v1.7.2 (2025-05-22) + +* **Bug Fix**: Handle checksum for unseekable body with 0 content length + +# v1.7.1 (2025-04-28) + +* **Bug Fix**: Don't emit warnings about lack of checksum validation for non-200 responses. + +# v1.7.0 (2025-03-11) + +* **Feature**: Add extra check during output checksum validation so the validation skip warning would not be logged if object is not fetched from s3 + +# v1.6.2 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2025-02-10) + +* **Feature**: Support CRC64NVME flex checksums. + +# v1.5.6 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.5 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.4 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.5.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.5.1 (2025-01-16) + +* **Bug Fix**: Fix nil dereference panic for operations that require checksums, but do not have an input setting for which algorithm to use. + +# v1.5.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.7 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go index a17041c35d..dab97fb22c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -9,6 +9,7 @@ import ( "fmt" "hash" "hash/crc32" + "hash/crc64" "io" "strings" "sync" @@ -30,13 +31,20 @@ const ( // AlgorithmSHA256 represents SHA256 hash algorithm AlgorithmSHA256 Algorithm = "SHA256" + + // AlgorithmCRC64NVME represents CRC64NVME hash algorithm + AlgorithmCRC64NVME Algorithm = "CRC64NVME" ) +// inverted NVME polynomial as required by crc64.MakeTable +const crc64NVME = 0x9a6c_9329_ac4b_c9b5 + var supportedAlgorithms = []Algorithm{ AlgorithmCRC32C, AlgorithmCRC32, AlgorithmSHA1, AlgorithmSHA256, + AlgorithmCRC64NVME, } func (a Algorithm) String() string { return string(a) } @@ -89,6 +97,8 @@ func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { return crc32.NewIEEE(), nil case AlgorithmCRC32C: return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + case AlgorithmCRC64NVME: + return crc64.New(crc64.MakeTable(crc64NVME)), nil default: return nil, fmt.Errorf("unknown checksum algorithm, %v", v) } @@ -106,6 +116,8 @@ func AlgorithmChecksumLength(v Algorithm) (int, error) { return crc32.Size, nil case AlgorithmCRC32C: return crc32.Size, nil + case AlgorithmCRC64NVME: + return crc64.Size, nil default: return 0, fmt.Errorf("unknown checksum algorithm, %v", v) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index 6785174da5..d4db9dd13a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.7" +const goModuleVersion = "1.7.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go index 1b727acbe1..274d649fb5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -1,6 +1,7 @@ package checksum import ( + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/smithy-go/middleware" ) @@ -14,11 +15,16 @@ type InputMiddlewareOptions struct { // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) - // Forces the middleware to compute the input payload's checksum. The - // request will fail if the algorithm is not specified or unable to compute - // the checksum. + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated RequireChecksum bool + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -46,33 +52,16 @@ type InputMiddlewareOptions struct { // AddInputMiddleware adds the middleware for performing checksum computing // of request payloads, and checksum validation of response payloads. +// +// Deprecated: This internal-only runtime API is frozen. Do not call or modify +// it in new code. Checksum-enabled service operations now generate this +// middleware setup code inline per #2507. func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { - // TODO ensure this works correctly with presigned URLs - - // Middleware stack: - // * (OK)(Initialize) --none-- - // * (OK)(Serialize) EndpointResolver - // * (OK)(Build) ComputeContentLength - // * (AD)(Build) Header ComputeInputPayloadChecksum - // * SIGNED Payload - If HTTP && not support trailing checksum - // * UNSIGNED Payload - If HTTPS && not support trailing checksum - // * (RM)(Build) ContentChecksum - OK to remove - // * (OK)(Build) ComputePayloadHash - // * v4.dynamicPayloadSigningMiddleware - // * v4.computePayloadSHA256 - // * v4.unsignedPayload - // (OK)(Build) Set computedPayloadHash header - // * (OK)(Finalize) Retry - // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, - // * Requires HTTPS && support trailing checksum - // * UNSIGNED Payload - // * Finalize run if HTTPS && support trailing checksum - // * (OK)(Finalize) Signing - // * (OK)(Deserialize) --none-- - // Initial checksum configuration look up middleware - err = stack.Initialize.Add(&setupInputContext{ - GetAlgorithm: options.GetAlgorithm, + err = stack.Initialize.Add(&SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, }, middleware.Before) if err != nil { return err @@ -80,8 +69,7 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) stack.Build.Remove("ContentChecksum") - inputChecksum := &computeInputPayloadChecksum{ - RequireChecksum: options.RequireChecksum, + inputChecksum := &ComputeInputPayloadChecksum{ EnableTrailingChecksum: options.EnableTrailingChecksum, EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, @@ -92,9 +80,8 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // If trailing checksum is not supported no need for finalize handler to be added. if options.EnableTrailingChecksum { - trailerMiddleware := &addInputChecksumTrailer{ + trailerMiddleware := &AddInputChecksumTrailer{ EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - RequireChecksum: inputChecksum.RequireChecksum, EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, } @@ -109,10 +96,10 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // RemoveInputMiddleware Removes the compute input payload checksum middleware // handlers from the stack. func RemoveInputMiddleware(stack *middleware.Stack) { - id := (*setupInputContext)(nil).ID() + id := (*SetupInputContext)(nil).ID() stack.Initialize.Remove(id) - id = (*computeInputPayloadChecksum)(nil).ID() + id = (*ComputeInputPayloadChecksum)(nil).ID() stack.Finalize.Remove(id) } @@ -126,6 +113,12 @@ type OutputMiddlewareOptions struct { // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation is the user config to opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // The set of checksum algorithms that should be used for response payload // checksum validation. The algorithm(s) used will be a union of the // output's returned algorithms and this set. @@ -134,7 +127,7 @@ type OutputMiddlewareOptions struct { ValidationAlgorithms []string // If set the middleware will ignore output multipart checksums. Otherwise - // an checksum format error will be returned by the middleware. + // a checksum format error will be returned by the middleware. IgnoreMultipartValidation bool // When set the middleware will log when output does not have checksum or @@ -150,7 +143,9 @@ type OutputMiddlewareOptions struct { // checksum. func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { err := stack.Initialize.Add(&setupOutputContext{ - GetValidationMode: options.GetValidationMode, + GetValidationMode: options.GetValidationMode, + SetValidationMode: options.SetValidationMode, + ResponseChecksumValidation: options.ResponseChecksumValidation, }, middleware.Before) if err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go new file mode 100644 index 0000000000..861a44293b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go @@ -0,0 +1,90 @@ +package checksum + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{ + AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32, + AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C, + AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1, + AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256, + AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64, +} + +// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage +type RequestChecksumMetricsTracking struct { + RequestChecksumCalculation aws.RequestChecksumCalculation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *RequestChecksumMetricsTracking) ID() string { + return "AWSChecksum:RequestMetricsTracking" +} + +// HandleBuild checks request checksum config and checksum value sent +// and sends corresponding feature id to user agent +func (m *RequestChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.RequestChecksumCalculation { + case aws.RequestChecksumCalculationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported) + case aws.RequestChecksumCalculationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired) + } + + for algo, feat := range supportedChecksumFeatures { + checksumHeader := AlgorithmHTTPHeader(algo) + if checksum := req.Header.Get(checksumHeader); checksum != "" { + m.UserAgent.AddUserAgentFeature(feat) + } + } + + return next.HandleBuild(ctx, in) +} + +// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage +type ResponseChecksumMetricsTracking struct { + ResponseChecksumValidation aws.ResponseChecksumValidation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *ResponseChecksumMetricsTracking) ID() string { + return "AWSChecksum:ResponseMetricsTracking" +} + +// HandleBuild checks the response checksum config and sends corresponding feature id to user agent +func (m *ResponseChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.ResponseChecksumValidation { + case aws.ResponseChecksumValidationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported) + case aws.ResponseChecksumValidationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired) + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go index 7ffca33f0e..31853839c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -7,6 +7,7 @@ import ( "hash" "io" "strconv" + "strings" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" @@ -16,7 +17,6 @@ import ( ) const ( - contentMD5Header = "Content-Md5" streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" ) @@ -39,8 +39,8 @@ func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { m.Set(computedInputChecksumsKey{}, vs) } -// computeInputPayloadChecksum middleware computes payload checksum -type computeInputPayloadChecksum struct { +// ComputeInputPayloadChecksum middleware computes payload checksum +type ComputeInputPayloadChecksum struct { // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -49,13 +49,6 @@ type computeInputPayloadChecksum struct { // the Algorithm's header is already set on the request. EnableTrailingChecksum bool - // States that a checksum is required to be included for the operation. If - // Input does not specify a checksum, fallback to built in MD5 checksum is - // used. - // - // Replaces smithy-go's ContentChecksum middleware. - RequireChecksum bool - // Enables support for computing the SHA256 checksum of input payloads // along with the algorithm specified checksum. Prevents downstream // middleware handlers (computePayloadSHA256) re-reading the payload. @@ -78,7 +71,7 @@ type computeInputPayloadChecksum struct { type useTrailer struct{} // ID provides the middleware's identifier. -func (m *computeInputPayloadChecksum) ID() string { +func (m *ComputeInputPayloadChecksum) ID() string { return "AWSChecksum:ComputeInputPayloadChecksum" } @@ -98,18 +91,27 @@ func (e computeInputHeaderChecksumError) Error() string { } func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } -// HandleBuild handles computing the payload's checksum, in the following cases: +// HandleFinalize handles computing the payload's checksum, in the following cases: // - Is HTTP, not HTTPS // - RequireChecksum is true, and no checksums were specified via the Input // - Trailing checksums are not supported // // The build handler must be inserted in the stack before ContentPayloadHash // and after ComputeContentLength. -func (m *computeInputPayloadChecksum) HandleFinalize( +func (m *ComputeInputPayloadChecksum) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + var checksum string + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } + if !ok { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, computeInputHeaderChecksumError{ @@ -117,8 +119,6 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } } - var algorithm Algorithm - var checksum string defer func() { if algorithm == "" || checksum == "" || err != nil { return @@ -130,29 +130,14 @@ func (m *computeInputPayloadChecksum) HandleFinalize( }) }() - // If no algorithm was specified, and the operation requires a checksum, - // fallback to the legacy content MD5 checksum. - algorithm, ok, err = getInputAlgorithm(ctx) - if err != nil { - return out, metadata, err - } else if !ok { - if m.RequireChecksum { - checksum, err = setMD5Checksum(ctx, req) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to compute stream's MD5 checksum", - Err: err, - } - } - algorithm = Algorithm("MD5") + // If any checksum header is already set nothing to do. + for header := range req.Header { + h := strings.ToUpper(header) + if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") { + algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-")) + checksum = req.Header.Get(header) + return next.HandleFinalize(ctx, in) } - return next.HandleFinalize(ctx, in) - } - - // If the checksum header is already set nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if checksum = req.Header.Get(checksumHeader); checksum != "" { - return next.HandleFinalize(ctx, in) } computePayloadHash := m.EnableComputePayloadHash @@ -194,7 +179,7 @@ func (m *computeInputPayloadChecksum) HandleFinalize( // Only seekable streams are supported for non-trailing checksums, because // the stream needs to be rewound before the handler can continue. - if stream != nil && !req.IsStreamSeekable() { + if stream != nil && !req.IsStreamSeekable() && streamLength != 0 { return out, metadata, computeInputHeaderChecksumError{ Msg: "unseekable stream is not supported without TLS and trailing checksum", } @@ -209,14 +194,17 @@ func (m *computeInputPayloadChecksum) HandleFinalize( Err: err, } } - - if err := req.RewindStream(); err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to rewind stream", - Err: err, + // only attempt rewind if the stream length has been determined and is non-zero + if streamLength > 0 { + if err := req.RewindStream(); err != nil { + return out, metadata, computeInputHeaderChecksumError{ + Msg: "failed to rewind stream", + Err: err, + } } } + checksumHeader := AlgorithmHTTPHeader(algorithm) req.Header.Set(checksumHeader, checksum) if computePayloadHash { @@ -242,28 +230,37 @@ func (e computeInputTrailingChecksumError) Error() string { } func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } -// addInputChecksumTrailer +// AddInputChecksumTrailer adds HTTP checksum when // - Is HTTPS, not HTTP // - A checksum was specified via the Input // - Trailing checksums are supported. -type addInputChecksumTrailer struct { +type AddInputChecksumTrailer struct { EnableTrailingChecksum bool - RequireChecksum bool EnableComputePayloadHash bool EnableDecodedContentLengthHeader bool } // ID identifies this middleware. -func (*addInputChecksumTrailer) ID() string { +func (*AddInputChecksumTrailer) ID() string { return "addInputChecksumTrailer" } // HandleFinalize wraps the request body to write the trailing checksum. -func (m *addInputChecksumTrailer) HandleFinalize( +func (m *AddInputChecksumTrailer) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return next.HandleFinalize(ctx, in) + } + if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { return next.HandleFinalize(ctx, in) } @@ -281,24 +278,11 @@ func (m *addInputChecksumTrailer) HandleFinalize( } } - // If no algorithm was specified, there is nothing to do. - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to get algorithm", - Err: err, + // If any checksum header is already set nothing to do. + for header := range req.Header { + if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") { + return next.HandleFinalize(ctx, in) } - } else if !ok { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "no algorithm specified", - } - } - - // If the checksum header is already set before finalize could run, there - // is nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if req.Header.Get(checksumHeader) != "" { - return next.HandleFinalize(ctx, in) } stream := req.GetStream() @@ -432,7 +416,7 @@ func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayload } func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { - if v := req.ContentLength; v > 0 { + if v := req.ContentLength; v >= 0 { return v, nil } @@ -444,39 +428,3 @@ func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { return -1, nil } - -// setMD5Checksum computes the MD5 of the request payload and sets it to the -// Content-MD5 header. Returning the MD5 base64 encoded string or error. -// -// If the MD5 is already set as the Content-MD5 header, that value will be -// returned, and nothing else will be done. -// -// If the payload is empty, no MD5 will be computed. No error will be returned. -// Empty payloads do not have an MD5 value. -// -// Replaces the smithy-go middleware for httpChecksum trait. -func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return v, nil - } - stream := req.GetStream() - if stream == nil { - return "", nil - } - - if !req.IsStreamSeekable() { - return "", fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return "", err - } - if err := req.RewindStream(); err != nil { - return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) - } - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - return string(v), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go index 3db73afe7e..3347e88cce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -3,43 +3,62 @@ package checksum import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) -// setupChecksumContext is the initial middleware that looks up the input +const ( + checksumValidationModeEnabled = "ENABLED" +) + +// SetupInputContext is the initial middleware that looks up the input // used to configure checksum behavior. This middleware must be executed before // input validation step or any other checksum middleware. -type setupInputContext struct { +type SetupInputContext struct { // GetAlgorithm is a function to get the checksum algorithm of the // input payload from the input parameters. // // Given the input parameter value, the function must return the algorithm // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) + + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequireChecksum bool + + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation } // ID for the middleware -func (m *setupInputContext) ID() string { +func (m *SetupInputContext) ID() string { return "AWSChecksum:SetupInputContext" } // HandleInitialize initialization middleware that setups up the checksum // context based on the input parameters provided in the stack. -func (m *setupInputContext) HandleInitialize( +func (m *SetupInputContext) HandleInitialize( ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation algorithm is specified. + // nil check here is for operations that require checksum but do not have input algorithm setting if m.GetAlgorithm != nil { - // check is input resource has a checksum algorithm - algorithm, ok := m.GetAlgorithm(in.Parameters) - if ok && len(algorithm) != 0 { + if algorithm, ok := m.GetAlgorithm(in.Parameters); ok { ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) + return next.HandleInitialize(ctx, in) } } + if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) + } + return next.HandleInitialize(ctx, in) } @@ -50,6 +69,12 @@ type setupOutputContext struct { // Given the input parameter value, the function must return the validation // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation states user config to opt-in/out checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation } // ID for the middleware @@ -64,13 +89,12 @@ func (m *setupOutputContext) HandleInitialize( ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation mode is specified. - if m.GetValidationMode != nil { - // check is input resource has a checksum algorithm - mode, ok := m.GetValidationMode(in.Parameters) - if ok && len(mode) != 0 { - ctx = setContextOutputValidationMode(ctx, mode) - } + + mode, _ := m.GetValidationMode(in.Parameters) + + if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled { + m.SetValidationMode(in.Parameters, checksumValidationModeEnabled) + ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled) } return next.HandleInitialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go index 9fde12d86d..65dd4c1eff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -3,6 +3,7 @@ package checksum import ( "context" "fmt" + "net/http" "strings" "github.com/aws/smithy-go" @@ -55,7 +56,7 @@ func (m *validateOutputPayloadChecksum) ID() string { } // HandleDeserialize is a Deserialize middleware that wraps the HTTP response -// body with an io.ReadCloser that will validate the its checksum. +// body with an io.ReadCloser that will validate its checksum. func (m *validateOutputPayloadChecksum) HandleDeserialize( ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, ) ( @@ -66,8 +67,7 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( return out, metadata, err } - // If there is no validation mode specified nothing is supported. - if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled { return out, metadata, err } @@ -78,6 +78,12 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( } } + // this runs BEFORE the deserializer, so we have to preemptively check for + // non-200, in which case there is no checksum to validate + if response.StatusCode != 200 { + return out, metadata, err + } + var expectedChecksum string var algorithmToUse Algorithm for _, algorithm := range m.Algorithms { @@ -90,13 +96,11 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( algorithmToUse = algorithm } - // TODO this must validate the validation mode is set to enabled. - logger := middleware.GetLogger(ctx) // Skip validation if no checksum algorithm or checksum is available. if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { - if m.LogValidationSkipped { + if response.Body != http.NoBody && m.LogValidationSkipped { // TODO this probably should have more information about the // operation output that won't be validated. logger.Logf(logging.Warn, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 35c7050dd1..2b5ceb4b51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,124 @@ +# v1.12.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.12.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.7 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index daf77b5c38..a165a100f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.7" +const goModuleVersion = "1.12.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index 150e26f4e1..ff2188db8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,124 @@ +# v1.18.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.18.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index a1f30ee06d..a0129140cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.5" +const goModuleVersion = "1.18.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go index f52f2f11e9..7251588b00 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -5,6 +5,7 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -30,6 +31,8 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware ) { out, metadata, err = next.HandleDeserialize(ctx, in) + span, _ := tracing.GetSpan(ctx) + resp, ok := out.RawResponse.(*smithyhttp.Response) if !ok { // No raw response to wrap with. @@ -40,12 +43,14 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { // set reqID on metadata for successful responses. awsmiddleware.SetRequestIDMetadata(&metadata, v) + span.SetProperty("aws.request_id", v) } // look up host-id if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { // set reqID on metadata for successful responses. SetHostIDMetadata(&metadata, v) + span.SetProperty("aws.extended_request_id", v) } return out, metadata, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index ed52046889..42dae69a92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,313 @@ +# v1.80.0 (2025-05-29) + +* **Feature**: Adding checksum support for S3 PutBucketOwnershipControls API. + +# v1.79.4 (2025-05-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.79.3 (2025-04-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.79.2 (2025-04-10) + +* No change notes available for this release. + +# v1.79.1 (2025-04-03) + +* No change notes available for this release. + +# v1.79.0 (2025-03-31) + +* **Feature**: Amazon S3 adds support for S3 Access Points for directory buckets in AWS Dedicated Local Zones + +# v1.78.2 (2025-03-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.78.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.78.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.0 (2025-02-14) + +* **Feature**: Added support for Content-Range header in HeadObject response. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.1 (2025-02-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.0 (2025-02-06) + +* **Feature**: Updated list of the valid AWS Region values for the LocationConstraint parameter for general purpose buckets. + +# v1.75.4 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.3 (2025-02-04) + +* No change notes available for this release. + +# v1.75.2 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.1 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.0 (2025-01-29) + +* **Feature**: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long. + +# v1.74.1 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.74.0 (2025-01-22) + +* **Feature**: Add a client config option to disable logging when output checksum validation is skipped due to an unsupported algorithm. + +# v1.73.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.1 (2025-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.3 (2025-01-14) + +* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. + +# v1.72.2 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.1 (2025-01-08) + +* No change notes available for this release. + +# v1.72.0 (2025-01-03) + +* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change. + +# v1.71.1 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.71.0 (2024-12-03.2) + +* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets. + +# v1.70.0 (2024-12-02) + +* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.69.0 (2024-11-25) + +* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications. + +# v1.68.0 (2024-11-21) + +* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. + +# v1.67.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.67.0 (2024-11-14) + +* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas. + +# v1.66.3 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.2 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.1 (2024-10-25) + +* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2 + +# v1.66.0 (2024-10-16) + +* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response. + +# v1.65.3 (2024-10-11) + +* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types + +# v1.65.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.1 (2024-10-07) + +* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.64.1 (2024-10-03) + +* No change notes available for this release. + +# v1.64.0 (2024-10-02) + +* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions. + +# v1.63.3 (2024-09-27) + +* No change notes available for this release. + +# v1.63.2 (2024-09-25) + +* No change notes available for this release. + +# v1.63.1 (2024-09-23) + +* No change notes available for this release. + +# v1.63.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.62.0 (2024-09-18) + +* **Feature**: Added SSE-KMS support for directory buckets. + +# v1.61.3 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.61.2 (2024-09-04) + +* No change notes available for this release. + +# v1.61.1 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.61.0 (2024-08-28) + +* **Feature**: Add presignPost for s3 PutObject + +# v1.60.1 (2024-08-22) + +* No change notes available for this release. + +# v1.60.0 (2024-08-20) + +* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs. + +# v1.59.0 (2024-08-15) + +* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.3 (2024-08-02) + +* **Bug Fix**: Add assurance tests for auth scheme selection logic. + +# v1.58.2 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.1 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.0 (2024-07-02) + +* **Feature**: Added response overrides to Head Object requests. + +# v1.57.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.57.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.56.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.2 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.1 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.0 (2024-06-05) + +* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. +* **Bug Fix**: Add S3-specific smithy protocol tests. + +# v1.54.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.3 (2024-05-23) + +* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). + +# v1.54.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.0 (2024-05-14) + +* **Feature**: Updated a few x-id in the http uri traits + +# v1.53.2 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.53.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index a31c2e0acf..40035a43da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -14,6 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" "github.com/aws/aws-sdk-go-v2/internal/v4a" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -22,22 +24,156 @@ import ( s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "S3" const ServiceAPIVersion = "2006-03-01" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3") +} + // Client provides the API client to make operations call for Amazon Simple // Storage Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -60,6 +196,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4a(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -82,6 +222,8 @@ func New(options Options, optFns ...func(*Options)) *Client { finalizeExpressCredentials(&options, client) + initializeTimeOffsetResolver(client) + return client } @@ -94,8 +236,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -125,15 +274,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -171,7 +361,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -259,15 +449,17 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + RequestChecksumCalculation: cfg.RequestChecksumCalculation, + ResponseChecksumValidation: cfg.ResponseChecksumValidation, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -459,6 +651,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -475,11 +691,36 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -548,6 +789,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -568,6 +821,99 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { }) } +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{ + RequestChecksumCalculation: options.RequestChecksumCalculation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{ + ResponseChecksumValidation: options.ResponseChecksumValidation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { return s3shared.AddMetadataRetrieverMiddleware(stack) } @@ -601,6 +947,41 @@ func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChec } +func addInputChecksumMiddleware(stack *middleware.Stack, options internalChecksum.InputMiddlewareOptions) (err error) { + err = stack.Initialize.Add(&internalChecksum.SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + inputChecksum := &internalChecksum.ComputeInputPayloadChecksum{ + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { + return err + } + + if options.EnableTrailingChecksum { + trailerMiddleware := &internalChecksum.AddInputChecksumTrailer{ + EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, + EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, + EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(trailerMiddleware, inputChecksum.ID(), middleware.After); err != nil { + return err + } + } + + return nil +} + // ChecksumValidationMetadata contains metadata such as the checksum algorithm // used for data integrity validation. type ChecksumValidationMetadata struct { @@ -859,6 +1240,10 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op return nil } +func withNoDefaultChecksumAPIOption(options *Options) { + options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired +} + func addRequestResponseLogging(stack *middleware.Stack, o Options) error { return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ LogRequest: o.ClientLogMode.IsRequest(), @@ -896,3 +1281,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index c71060e082..f29c1a682a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // This operation aborts a multipart upload. After a multipart upload is aborted, @@ -18,22 +19,35 @@ import ( // by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed and prevent getting charged for the part storage, -// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// API operation and ensure that the parts list is empty. Directory buckets - For -// directory buckets, you must make requests for this API operation to the Zonal -// endpoint. These endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for the +// part storage, you should call the [ListParts]API operation and ensure that the parts list +// is empty. +// +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. To delete these in-progress multipart uploads, +// use the ListMultipartUploads operation to list the in-progress multipart +// uploads in the bucket and use the AbortMultipartUpload operation to abort all +// the in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -41,17 +55,32 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to AbortMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to AbortMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { if params == nil { params = &AbortMultipartUploadInput{} @@ -69,31 +98,40 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,20 +151,33 @@ type AbortMultipartUploadInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // If present, this header aborts an in progress multipart upload only if it was + // initiated on the provided timestamp. If the initiated timestamp of the multipart + // upload does not match the provided value, the operation returns a 412 + // Precondition Failed error. If the initiated timestamp matches or if the + // multipart upload doesn’t exist, the operation returns a 204 Success (No Content) + // response. + // + // This functionality is only supported for directory buckets. + IfMatchInitiatedTime *time.Time + // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -135,7 +186,12 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -187,6 +243,9 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -202,6 +261,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -235,6 +306,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index 8f89d780ee..2ddcc0eadb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -13,88 +13,138 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Completes a multipart upload by assembling previously uploaded parts. You first -// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. After successfully uploading all relevant parts of an upload, you -// call this CompleteMultipartUpload operation to complete the upload. Upon -// receiving this request, Amazon S3 concatenates all the parts in ascending order -// by part number to create a new object. In the CompleteMultipartUpload request, -// you must provide the parts list and ensure that the parts list is complete. The -// CompleteMultipartUpload API operation concatenates the parts that you provide in -// the list. For each part in the list, you must provide the PartNumber value and -// the ETag value that are returned after that part was uploaded. The processing -// of a CompleteMultipartUpload request could take several minutes to finalize. -// After Amazon S3 begins processing the request, it sends an HTTP response header -// that specifies a 200 OK response. While processing is in progress, Amazon S3 -// periodically sends white space characters to keep the connection from timing -// out. A request could fail after the initial 200 OK response has been sent. This -// means that a 200 OK response can contain either a success or an error. The -// error response might be embedded in the 200 OK response. If you call this API -// operation directly, make sure to design your application to parse the contents -// of the response and handle it appropriately. If you use Amazon Web Services -// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply -// error handling per your configuration settings (including automatically retrying -// the request as appropriate). If the condition persists, the SDKs throw an -// exception (or, for the SDKs that don't use exceptions, they return an error). +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the [UploadPart] +// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of +// an upload, you call this CompleteMultipartUpload operation to complete the +// upload. Upon receiving this request, Amazon S3 concatenates all the parts in +// ascending order by part number to create a new object. In the +// CompleteMultipartUpload request, you must provide the parts list and ensure that +// the parts list is complete. The CompleteMultipartUpload API operation +// concatenates the parts that you provide in the list. For each part in the list, +// you must provide the PartNumber value and the ETag value that are returned +// after that part was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either a +// success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect +// the embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). +// // Note that if CompleteMultipartUpload fails, applications should be prepared to // retry any failed requests (including 500 error responses). For more information, -// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) -// . You can't use Content-Type: application/x-www-form-urlencoded for the +// see [Amazon S3 Error Best Practices]. +// +// You can't use Content-Type: application/x-www-form-urlencoded for the // CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. For more -// information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// header, CompleteMultipartUpload can still return a 200 OK response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted +// +// with Key Management Service, you must have permission to use the kms:Decrypt +// action for the CompleteMultipartUpload request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Special errors +// // - Error Code: EntityTooSmall +// // - Description: Your proposed upload is smaller than the minimum allowed // object size. Each part must be at least 5 MB in size, except the last part. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPart +// // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified ETag might not have matched // the uploaded part's ETag. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPartOrder +// // - Description: The list of parts was not in ascending order. The parts list // must be specified in order by part number. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CompleteMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CompleteMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { if params == nil { params = &CompleteMultipartUploadInput{} @@ -112,31 +162,40 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -153,37 +212,98 @@ type CompleteMultipartUploadInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. You can use this header as a data integrity check to verify + // that the checksum type that is received is the same checksum that was specified. + // If the checksum type doesn’t match the checksum type that was specified for the + // object during the CreateMultipartUpload request, it’ll result in a BadDigest + // error. For more information, see Checking object integrity in the Amazon S3 User + // Guide. + ChecksumType types.ChecksumType + // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and + // re-upload each part. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should re-initiate the + // multipart upload with CreateMultipartUpload and re-upload each part. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + + // The expected total object size of the multipart upload request. If there’s a + // mismatch between the specified object size value and the actual object size + // value, it results in an HTTP 400 InvalidRequest error. + MpuObjectSize *int64 + // The container for the multipart upload request information. MultipartUpload *types.CompletedMultipartUpload @@ -191,38 +311,47 @@ type CompleteMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is required only when the object was created using a checksum // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -231,63 +360,90 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // as a data integrity check to verify that the checksum type that is received is + // the same checksum type that was specified during the CreateMultipartUpload + // request. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Entity tag that identifies the newly created object's data. Objects with // different object data will have different entity tags. The entity tag is an // opaque string. The entity tag may or may not be an MD5 digest of the object // data. If the entity tag is not an MD5 digest of the object data, it will contain // one or more nonhexadecimal characters and/or will consist of less than 32 or // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ETag *string // If the object expiration is configured, this will contain the expiration date ( // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // // This functionality is not supported for directory buckets. Expiration *string @@ -298,21 +454,25 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning - // turned on. This functionality is not supported for directory buckets. + // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -364,6 +524,9 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -379,6 +542,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -415,6 +590,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index c7990bab1d..912fffb6f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -15,102 +15,155 @@ import ( "time" ) -// Creates a copy of an object that is already stored in Amazon S3. You can store -// individual objects of up to 5 TB in Amazon S3. You create a copy of your object -// up to 5 GB in size in a single atomic action using this API. However, to copy an -// object greater than 5 GB, you must use the multipart upload Upload Part - Copy -// (UploadPartCopy) API. For more information, see Copy Object Using the REST -// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) -// . You can copy individual objects between general purpose buckets, between +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a copy +// of your object up to 5 GB in size in a single atomic action using this API. +// However, to copy an object greater than 5 GB, you must use the multipart upload +// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. +// +// You can copy individual objects between general purpose buckets, between // directory buckets, and between general purpose buckets and directory buckets. -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Both the Region that you want to copy the object -// from and the Region that you want to copy the object to must be enabled for your -// account. For more information about how to enable a Region for your account, see -// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) -// in the Amazon Web Services Account Management Guide. Amazon S3 transfer -// acceleration does not support cross-Region copies. If you request a cross-Region -// copy using a transfer acceleration endpoint, you get a 400 Bad Request error. -// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . Authentication and authorization All CopyObject requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see REST -// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use the IAM credentials to authenticate and +// +// - Amazon S3 supports copy operations using Multi-Region Access Points only as +// a destination when using the Multi-Region Access Point ARN. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - VPC endpoints don't support cross-Region requests (including copies). If +// you're using VPC endpoints, your source and destination buckets should be in the +// same Amazon Web Services Region as your VPC endpoint. +// +// Both the Region that you want to copy the object from and the Region that you +// want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon +// Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If you +// request a cross-Region copy using a transfer acceleration endpoint, you get a +// 400 Bad Request error. For more information, see [Transfer Acceleration]. +// +// Authentication and authorization All CopyObject requests must be authenticated +// and signed by using IAM credentials (access key ID and secret access key for the +// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source +// , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use the IAM credentials to authenticate and // authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. Amazon -// Web Services CLI or SDKs handles authentication and authorization on your -// behalf. Permissions You must have read access to the source object and write -// access to the destination bucket. +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// +// Permissions You must have read access to the source object and write access to +// the destination bucket. +// // - General purpose bucket permissions - You must have permissions in an IAM // policy based on the source and destination bucket types in a CopyObject // operation. +// // - If the source object is in a general purpose bucket, you must have // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have // s3:PutObject permission to write the object copy to the destination bucket. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in a CopyObject operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a // policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. For example policies, see -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination bucket. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Response and special errors When the request is an HTTP 1.1 request, the // response is chunk encoded. When the request is not an HTTP 1.1 request, the // response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. to keep the connection alive -// while we copy the data. +// entire response body to check if the copy succeeds. +// // - If the copy is successful, you receive a response with information about // the copied object. +// // - A copy request might return an error when Amazon S3 receives the copy // request or while Amazon S3 is copying the files. A 200 OK response can contain // either a success or an error. +// // - If the error occurs before the copy action starts, you receive a standard // Amazon S3 error. +// // - If the error occurs during the copy operation, the error response is // embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see -// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror) +// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] // . The 200 OK status code means the copy was accepted, but it doesn't mean the // copy is complete. Another example is when you disconnect from Amazon S3 before // the copy is complete, Amazon S3 might cancel the copy and you may receive a // 200 OK response. You must stay connected to Amazon S3 until the entire -// response is successfully received and processed. If you call this API operation -// directly, make sure to design your application to parse the content of the -// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs -// handle this condition. The SDKs detect the embedded error and apply error -// handling per your configuration settings (including automatically retrying the -// request as appropriate). If the condition persists, the SDKs throw an exception -// (or, for the SDKs that don't use exceptions, they return an error). +// response is successfully received and processed. +// +// If you call this API operation directly, make sure to design your application +// +// to parse the content of the response and handle it appropriately. If you use +// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the +// embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). // // Charge The copy request charge is based on the storage class and Region that // you specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data // retrieval. If the copy source is in a different region, the data transfer is -// billed to the copy source account. For pricing information, see Amazon S3 -// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory -// buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CopyObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. +// +// HTTP Host header syntax +// +// - Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// - Amazon S3 on Outposts - When you use this action with S3 on Outposts +// through the REST API, you must direct requests to the S3 on Outposts hostname. +// The S3 on Outposts hostname takes the form +// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The +// hostname isn't required when you use the Amazon Web Services CLI or SDKs. +// +// The following operations are related to CopyObject : +// +// [PutObject] +// +// [GetObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror +// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { if params == nil { params = &CopyObjectInput{} @@ -128,31 +181,52 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must use the + // Outpost bucket access point ARN or the access point alias for the destination + // bucket. + // + // You can only copy objects within the same Outpost bucket. It's not supported to + // copy objects across different Amazon Web Services Outposts, between buckets on + // the same Outposts, or between Outposts buckets and any other bucket types. For + // more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts guide. When + // you use this action with S3 on Outposts through the REST API, you must direct + // requests to the S3 on Outposts hostname, in the format + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The + // hostname isn't required when you use the Amazon Web Services CLI or SDKs. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -160,10 +234,11 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. The source object can be up // to 5 GB. If the source object is an object that was uploaded by using a // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. You specify the value of the copy - // source in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending on + // whether you want to access the source object through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and the key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the general purpose @@ -172,6 +247,7 @@ type CopyObjectInput struct { // bucket awsexamplebucket--use1-az5--x-s3 , use // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be // URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -179,15 +255,20 @@ type CopyObjectInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your source bucket versioning is enabled, the x-amz-copy-source header by // default identifies the current version of an object to copy. If the current // version is a delete marker, Amazon S3 behaves as if the object was deleted. To @@ -195,14 +276,21 @@ type CopyObjectInput struct { // append ?versionId= to the value (for example, // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. If you enable versioning on the destination bucket, Amazon S3 - // generates a unique version ID for the copied object. This version ID is - // different from the version ID of the source object. Amazon S3 returns the - // version ID of the copied object in the x-amz-version-id response header in the - // response. If you do not enable versioning or suspend it on the destination - // bucket, the version ID that Amazon S3 generates in the x-amz-version-id - // response header is always null. Directory buckets - S3 Versioning isn't enabled - // and supported for directory buckets. + // source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from the + // version ID of the source object. Amazon S3 returns the version ID of the copied + // object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, the + // version ID that Amazon S3 generates in the x-amz-version-id response header is + // always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -212,51 +300,71 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned access control list (ACL) to apply to the object. When you copy an - // object, the ACL metadata is not preserved and is set to private by default. - // Only the owner has full access control. To override the default ACL setting, - // specify a new ACL when you generate a copy request. For more information, see - // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . If the destination bucket that you're copying objects to uses the bucket owner + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to private + // by default. Only the owner has full access control. To override the default ACL + // setting, specify a new ACL when you generate a copy request. For more + // information, see [Using ACLs]. + // + // If the destination bucket that you're copying objects to uses the bucket owner // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect // permissions. Buckets that use this setting only accept PUT requests that don't // specify an ACL or PUT requests that specify bucket owner full control ACLs, // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 + // User Guide. + // // - If your destination bucket uses the bucket owner enforced setting for // Object Ownership, all objects written to the bucket by any account will be owned // by the bucket owner. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. For more information, see - // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // affect bucket-level settings for S3 Bucket Key. + // + // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + // + // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS + // encrypted objects from general purpose buckets to directory buckets, from + // directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request + // is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html BucketKeyEnabled *bool // Specifies the caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. When you copy an object, if the source object has a - // checksum, that checksum value will be copied to the new object by default. If - // the CopyObject request does not include this x-amz-checksum-algorithm header, - // the checksum algorithm will be copied from the source object to the destination - // object (if it's present on the source object). You can optionally specify a - // different checksum algorithm to use with the x-amz-checksum-algorithm header. - // Unrecognized or unsupported values will respond with the HTTP status code 400 - // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, - // CRC32 is the default checksum algorithm that's used for performance. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's present + // on the source object). You can optionally specify a different checksum algorithm + // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported + // values will respond with the HTTP status code 400 Bad Request . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Specifies presentational information for the object. Indicates whether an @@ -266,8 +374,10 @@ type CopyObjectInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language the content is in. @@ -276,62 +386,85 @@ type CopyObjectInput struct { // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both the - // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers - // are present in the request and evaluate as follows, Amazon S3 returns the 412 - // Precondition Failed response code: - // - x-amz-copy-source-if-none-match condition evaluates to false - // - x-amz-copy-source-if-modified-since condition evaluates to true - CopySourceIfModifiedSince *time.Time - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both the x-amz-copy-source-if-none-match and + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request and // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // + // - x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfModifiedSince *time.Time + + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // + // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, - // you must provide the necessary encryption information in your request so that - // Amazon S3 can decrypt the object for copying. This functionality is not - // supported when the source object is in a directory bucket. + // AES256 ). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. If the source object for - // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary - // encryption information in your request so that Amazon S3 can decrypt the object - // for copying. This functionality is not supported when the source object is in a - // directory bucket. + // one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. If the source object for the copy - // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption - // information in your request so that Amazon S3 can decrypt the object for - // copying. This functionality is not supported when the source object is in a - // directory bucket. + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -348,22 +481,30 @@ type CopyObjectInput struct { Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string @@ -373,26 +514,32 @@ type CopyObjectInput struct { // Specifies whether the metadata is copied from the source object or replaced // with metadata that's provided in the request. When copying an object, you can // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. General purpose bucket - For - // general purpose buckets, when you grant permissions, you can use the - // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior - // when objects are uploaded. For more information, see Amazon S3 condition key - // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) - // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each - // object and is not copied when using the x-amz-metadata-directive header. To - // copy the value, you must specify x-amz-website-redirect-location in the request - // header. + // isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant + // permissions, you can use the s3:x-amz-metadata-directive condition key to + // enforce certain metadata behavior when objects are uploaded. For more + // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied when + // using the x-amz-metadata-directive header. To copy the value, you must specify + // x-amz-website-redirect-location in the request header. + // + // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the object copy. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the object copy. This - // functionality is not supported for directory buckets. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want the Object Lock of the object copy to expire. + // // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time @@ -400,19 +547,23 @@ type CopyObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // When you perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate // encryption-related headers to encrypt the target object with an Amazon S3 // managed key, a KMS key, or a customer-provided key. If the encryption setting in // your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. + // // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerAlgorithm *string @@ -421,81 +572,155 @@ type CopyObjectInput struct { // encrypting data. This value is used to store the object and then it is // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported when the destination bucket is a directory bucket. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value must be explicitly - // added to specify encryption context for CopyObject requests. This functionality - // is not supported when the destination bucket is a directory bucket. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for the destination object encryption. The value of + // this header is a base64-encoded UTF-8 string holding JSON with the encryption + // context key-value pairs. + // + // General purpose buckets - This value must be explicitly added to specify + // encryption context for CopyObject requests if you want an additional encryption + // context for your destination object. The additional encryption context of the + // source object won't be copied to the destination object. For more information, + // see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object // encryption. All GET and PUT requests for an object protected by KMS will fail if // they're not made via SSL or using SigV4. For information about configuring any // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported - // values won’t write a destination object and will receive a 400 Bad Request - // response. Amazon S3 automatically encrypts all new objects that are copied to an - // S3 bucket. When copying an object, if you don't specify encryption information - // in your copy request, the encryption setting of the target object is set to the + // The server-side encryption algorithm used when storing this object in Amazon + // S3. Unrecognized or unsupported values won’t write a destination object and will + // receive a 400 Bad Request response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information in + // your copy request, the encryption setting of the target object is set to the // default encryption configuration of the destination bucket. By default, all // buckets have a base level of encryption configuration that uses server-side // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a - // default encryption configuration that uses server-side encryption with Key - // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with - // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with - // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS - // key, or a customer-provided key to encrypt the target object copy. When you - // perform a CopyObject operation, if you want to use a different type of - // encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // With server-side encryption, Amazon S3 encrypts your data as it writes your data - // to disks in its data centers and decrypts the data when you access it. For more - // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) - // in the Amazon S3 User Guide. For directory buckets, only server-side encryption - // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // different default encryption configuration, Amazon S3 uses the corresponding + // encryption key to encrypt the target object copy. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. For + // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + // + // General purpose buckets + // + // - For general purpose buckets, there are the following supported options for + // server-side encryption: server-side encryption with Key Management Service (KMS) + // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS + // keys (DSSE-KMS), and server-side encryption with customer-provided encryption + // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided + // key to encrypt the target object copy. + // + // - When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // + // Directory buckets + // + // - For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( + // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We + // recommend that the bucket's default encryption uses the desired encryption + // configuration and you don't override the bucket default encryption in your + // CreateSession requests or PUT object requests. Then, new objects are + // automatically encrypted with the desired encryption settings. For more + // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the + // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // - To encrypt new object copies to a directory bucket with SSE-KMS, we + // recommend you specify SSE-KMS as the directory bucket's default encryption + // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't + // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket + // for the lifetime of the bucket. After you specify a customer managed key for + // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS + // configuration. Then, when you perform a CopyObject operation and want to + // specify server-side encryption settings for new object copies with SSE-KMS in + // the encryption-related request headers, you must ensure the encryption key is + // the same customer managed key that you specified for the directory bucket's + // default encryption configuration. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk ServerSideEncryption types.ServerSideEncryption // If the x-amz-storage-class header is not used, the copied object will be stored // in the STANDARD Storage Class by default. The STANDARD storage class provides // high durability and high availability. Depending on performance needs, you can // specify a different Storage Class. - // - Directory buckets - For directory buckets, only the S3 Express One Zone - // storage class is supported to store newly created objects. Unsupported storage - // class values won't write a destination object and will respond with the HTTP - // status code 400 Bad Request . + // + // - Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported + // storage class values won't write a destination object and will respond with the + // HTTP status code 400 Bad Request . + // // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // // You can use the CopyObject action to change the storage class of an object that // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. Before using an object as a source object for the - // copy operation, you must restore a copy of it if it meets any of the following - // conditions: + // more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 - // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) - // is Archive Access or Deep Archive Access . - // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) - // in the Amazon S3 User Guide. + // + // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is + // Archive Access or Deep Archive Access . + // + // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition StorageClass types.StorageClass // The tag-set for the object copy in the destination bucket. This value must be @@ -503,60 +728,82 @@ type CopyObjectInput struct { // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive // , you don't need to set the x-amz-tagging header, because the tag-set will be // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. The default value is the empty value. Directory buckets - For - // directory buckets in a CopyObject operation, only the empty tag-set is - // supported. Any requests that attempt to write non-empty tags into directory - // buckets will receive a 501 Not Implemented status code. When the destination - // bucket is a directory bucket, you will receive a 501 Not Implemented response - // in any of the following situations: + // parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. The default value is - // COPY . Directory buckets - For directory buckets in a CopyObject operation, - // only the empty tag-set is supported. Any requests that attempt to write - // non-empty tags into directory buckets will receive a 501 Not Implemented status - // code. When the destination bucket is a directory bucket, you will receive a 501 - // Not Implemented response in any of the following situations: + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY . + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. @@ -567,71 +814,82 @@ type CopyObjectInput struct { // Amazon S3 stores the value of this header in the object metadata. This value is // unique to each object and is not copied when using the x-amz-metadata-directive // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. This functionality is not supported for - // directory buckets. + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.CopySource = in.CopySource + p.Key = in.Key p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version ID of the source object that was copied. This functionality is not - // supported when the source object is in a directory bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. This - // functionality is not supported for directory buckets. + // If the object expiration is configured, the response includes this header. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. This functionality is not supported for - // directory buckets. + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -683,6 +941,9 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -698,6 +959,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { return err } @@ -734,6 +1007,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index b39244bcfe..5dfadb4f73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -15,89 +15,118 @@ import ( ) // This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) -// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and -// have a valid Amazon Web Services Access Key ID to authenticate requests. -// Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. There are two types of buckets: general purpose -// buckets and directory buckets. For more information about these bucket types, -// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// in the Amazon S3 User Guide. +// bucket, see [CreateBucket]CreateBucket . +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have +// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you become +// the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. +// // - General purpose buckets - If you send your CreateBucket request to the // s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So // the signature calculations in Signature Version 4 must use us-east-1 as the // Region, even if the location constraint in the request specifies another Region // where the bucket is to be created. If you create a bucket in a Region other than // US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) -// in the Amazon S3 User Guide. +// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - In addition to the s3:CreateBucket // permission, the following permissions are required in a policy when your // CreateBucket request includes specific headers: +// // - Access control lists (ACLs) - In your CreateBucket request, if you specify // an access control list (ACL) and set it to public-read , public-read-write , // authenticated-read , or if you explicitly specify any other custom ACLs, both // s3:CreateBucket and s3:PutBucketAcl permissions are required. In your // CreateBucket request, if you set the ACL to private , or if you don't specify // any ACLs, only the s3:CreateBucket permission is required. +// // - Object Lock - In your CreateBucket request, if you set // x-amz-bucket-object-lock-enabled to true, the // s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are // required. +// // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. To set an ACL on a bucket as part of a CreateBucket -// request, you must explicitly set S3 Object Ownership for the bucket to a -// different value than the default, BucketOwnerEnforced . Additionally, if your -// desired bucket ACL grants public access, you must first create the bucket -// (without the bucket ACL) and then explicitly disable Block Public Access on the -// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket -// with a public ACL, the request will fail. For the majority of modern use cases -// in S3, we recommend that you keep all Block Public Access settings enabled and -// keep ACLs disabled. If you would like to share data with users outside of your -// account, you can use bucket policies as needed. For more information, see -// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about S3 Block Public Access, see Blocking -// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object -// Ownership, and S3 Block Public Access are not supported for directory buckets. -// For directory buckets, all Block Public Access settings are enabled at the -// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs -// disabled). These settings can't be modified. For more information about -// permissions for creating and working with directory buckets, see Directory -// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. For more information about supported S3 features -// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) -// in the Amazon S3 User Guide. +// permission is required. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// CreateBucket : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly +// +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and then +// explicitly disable Block Public Access on the bucket before using PutBucketAcl +// to set the ACL. If you try to create a bucket with a public ACL, the request +// will fail. +// +// For the majority of modern use cases in S3, we recommend that you keep all +// +// Block Public Access settings enabled and keep ACLs disabled. If you would like +// to share data with users outside of your account, you can use bucket policies as +// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the +// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block +// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public +// +// Access are not supported for directory buckets. For directory buckets, all Block +// Public Access settings are enabled at the bucket level and S3 Object Ownership +// is set to Bucket owner enforced (ACLs disabled). These settings can't be +// modified. +// +// For more information about permissions for creating and working with directory +// +// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about +// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to CreateBucket : +// +// [PutObject] +// +// [DeleteBucket] +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html +// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { params = &CreateBucketInput{} @@ -115,77 +144,100 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. General purpose buckets - For information - // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) - // in the Amazon S3 User Guide. Directory buckets - When you use this operation - // with a directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. This functionality is not supported for - // directory buckets. + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for directory buckets. + // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for directory buckets. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // directory buckets. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. This functionality is not supported for directory buckets. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for directory buckets. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde } func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) p.DisableAccessPoints = ptr.Bool(true) @@ -245,6 +297,9 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -260,6 +315,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { return err } @@ -293,6 +360,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go new file mode 100644 index 0000000000..fbac4458fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go @@ -0,0 +1,293 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a metadata table configuration for a general purpose bucket. For more +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the following permissions. For +// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. +// +// If you also want to integrate your table bucket with Amazon Web Services +// analytics services so that you can query your metadata table, you need +// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. +// +// - s3:CreateBucketMetadataTableConfiguration +// +// - s3tables:CreateNamespace +// +// - s3tables:GetTable +// +// - s3tables:CreateTable +// +// - s3tables:PutTablePolicy +// +// The following operations are related to CreateBucketMetadataTableConfiguration : +// +// [DeleteBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html +func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &CreateBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to create the metadata table + // configuration in. + // + // This member is required. + Bucket *string + + // The contents of your metadata table configuration. + // + // This member is required. + MetadataTableConfiguration *types.MetadataTableConfiguration + + // The checksum algorithm to use with your metadata table configuration. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The Content-MD5 header for the metadata table configuration. + ContentMD5 *string + + // The expected owner of the general purpose bucket that contains your metadata + // table configuration. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type CreateBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateBucketMetadataTableConfiguration", + } +} + +// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the +// request checksum algorithm value provided as input. +func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember, + RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index c083c32d8e..4fc77ed385 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -16,51 +16,54 @@ import ( // This action initiates a multipart upload and returns an upload ID. This upload // ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see -// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// ). You also include this upload ID in the final request to either complete or -// abort the multipart upload request. For more information about multipart -// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide. After you initiate a multipart upload and upload -// one or more parts, to stop being charged for storing the uploaded parts, you -// must either complete or abort the multipart upload. Amazon S3 frees up the space -// used to store the parts and stops charging you for storing them only after you -// either complete or abort a multipart upload. If you have configured a lifecycle -// rule to abort incomplete multipart uploads, the created multipart upload must be -// completed within the number of days specified in the bucket lifecycle -// configuration. Otherwise, the incomplete multipart upload becomes eligible for -// an abort action and Amazon S3 aborts the multipart upload. For more information, -// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// . +// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). +// You also include this upload ID in the final request to either complete or abort +// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stops charging you for storing them only after you either complete or abort a +// multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts the +// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. +// // - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Request signing For request signing, multipart upload is just a series of // regular requests. You initiate a multipart upload, send one or more requests to // upload parts, and then complete the multipart upload process. You sign each // request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see Authenticating Requests -// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information about the permissions -// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. To perform a multipart upload with encryption by -// using an Amazon Web Services KMS key, the requester must have permission to the -// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are -// required because Amazon S3 must decrypt and read data from the encrypted file -// parts before it completes the multipart upload. For more information, see -// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service (KMS) KMS key, the requester must +// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. +// The requester must also have permissions for the kms:GenerateDataKey action +// for the CreateMultipartUpload API. Then, the requester needs permissions for +// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These +// permissions are required because Amazon S3 must decrypt and read data from the +// encrypted file parts before it completes the multipart upload. For more +// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -68,10 +71,10 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption +// // - General purpose buckets - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. Amazon S3 automatically encrypts all new @@ -91,61 +94,135 @@ import ( // in your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. If // you choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the CreateMultipartUpload request. +// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload +// request. +// // - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( // aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) // – If you want Amazon Web Services to manage the keys used to encrypt data, // specify the following headers in the request. +// // - x-amz-server-side-encryption +// // - x-amz-server-side-encryption-aws-kms-key-id +// // - x-amz-server-side-encryption-context +// // - If you specify x-amz-server-side-encryption:aws:kms , but don't provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 key) in KMS to protect the data. +// // - To perform a multipart upload with encryption by using an Amazon Web // Services KMS key, the requester must have permission to the kms:Decrypt and // kms:GenerateDataKey* actions on the key. These permissions are required // because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see Multipart -// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in +// the Amazon S3 User Guide. +// // - If your Identity and Access Management (IAM) user or role is in the same // Amazon Web Services account as the KMS key, then you must have these permissions // on the key policy. If your IAM user or role is in a different account from the // key, then you must have the permissions on both the key policy and your IAM user // or role. +// // - All GET and PUT requests for an object protected by KMS fail if you don't // make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), // or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see -// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) -// in the Amazon S3 User Guide. For more information about server-side -// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with customer-provided encryption keys (SSE-C), see -// Protecting data using server-side encryption with customer-provided encryption -// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// - Directory buckets -For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the +// Amazon S3 User Guide. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CreateMultipartUpload : -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] +// +// in the Amazon S3 User Guide. +// +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about server-side encryption with customer-provided +// +// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the +// +// encryption request headers must match the encryption settings that are specified +// in the CreateSession request. You can't override the values of the encryption +// settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the +// CreateSession request. You don't need to explicitly specify these encryption +// settings values in Zonal endpoint API calls, and Amazon S3 will use the +// encryption settings values from the CreateSession request to protect new +// objects in the directory bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption +// request headers must match the default encryption configuration of the directory +// bucket. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CreateMultipartUpload : +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { if params == nil { params = &CreateMultipartUploadInput{} @@ -164,30 +241,40 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -199,41 +286,67 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. Amazon S3 supports a set of predefined // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. By default, all objects are private. Only the owner - // has full access control. When uploading an object, you can grant access - // permissions to individual Amazon Web Services accounts or to predefined groups - // defined by Amazon S3. These permissions are then added to the access control - // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . One way to grant the permissions using the request headers is to specify a - // canned ACL with the x-amz-acl request header. + // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual Amazon + // Web Services accounts or to predefined groups defined by Amazon S3. These + // permissions are then added to the access control list (ACL) on the new object. + // For more information, see [Using ACLs]. One way to grant the permissions using the request + // headers is to specify a canned ACL with the x-amz-acl request header. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with an object action doesn’t - // affect bucket-level settings for S3 Bucket Key. This functionality is not - // supported for directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language that the content is in. @@ -251,213 +364,386 @@ type CreateMultipartUploadInput struct { Expires *time.Time // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. By default, all objects are private. Only - // the owner has full access control. When uploading an object, you can use this - // header to explicitly grant access permissions to specific Amazon Web Services - // accounts or groups. This header maps to specific permissions that Amazon S3 - // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - uri – if you are granting permissions to a predefined group - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: - // - US East (N. Virginia) - // - US West (N. California) - // - US West (Oregon) - // - Asia Pacific (Singapore) - // - Asia Pacific (Sydney) - // - Asia Pacific (Tokyo) - // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" - // - This functionality is not supported for directory buckets. - // - This functionality is not supported for Amazon S3 on Outposts. - GrantFullControl *string - - // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. By default, all objects are private. Only the owner has full - // access control. When uploading an object, you can use this header to explicitly - // grant access permissions to specific Amazon Web Services accounts or groups. - // This header maps to specific permissions that Amazon S3 supports in an ACL. For - // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: - // - id – if the value specified is the canonical user ID of an Amazon Web - // Services account - // - uri – if you are granting permissions to a predefined group - // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: - // - US East (N. Virginia) - // - US West (N. California) - // - US West (Oregon) - // - Asia Pacific (Singapore) - // - Asia Pacific (Sydney) - // - Asia Pacific (Tokyo) - // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. - // For example, the following x-amz-grant-read header grants the Amazon Web - // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" - // - This functionality is not supported for directory buckets. - // - This functionality is not supported for Amazon S3 on Outposts. - GrantRead *string - - // Specify access permissions explicitly to allows grantee to read the object ACL. + // WRITE_ACP permissions on the object. + // // By default, all objects are private. Only the owner has full access control. // When uploading an object, you can use this header to explicitly grant access // permissions to specific Amazon Web Services accounts or groups. This header maps // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + GrantFullControl *string + + // Specify access permissions explicitly to allow grantee to read the object data + // and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // + // - uri – if you are granting permissions to a predefined group + // + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // + // - US East (N. Virginia) + // + // - US West (N. California) + // + // - US West (Oregon) + // + // - Asia Pacific (Singapore) + // + // - Asia Pacific (Sydney) + // + // - Asia Pacific (Tokyo) + // + // - Europe (Ireland) + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // - This functionality is not supported for directory buckets. + // + // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + GrantRead *string + + // Specify access permissions explicitly to allows grantee to read the object ACL. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // + // - uri – if you are granting permissions to a predefined group + // + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // + // - US East (N. Virginia) + // + // - US West (N. California) + // + // - US West (Oregon) + // + // - Asia Pacific (Singapore) + // + // - Asia Pacific (Sydney) + // + // - Asia Pacific (Tokyo) + // + // - Europe (Ireland) + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // - This functionality is not supported for directory buckets. + // + // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantReadACP *string // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. By default, all objects are private. - // Only the owner has full access control. When uploading an object, you can use - // this header to explicitly grant access permissions to specific Amazon Web - // Services accounts or groups. This header maps to specific permissions that - // Amazon S3 supports in an ACL. For more information, see Access Control List - // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. This - // functionality is not supported for directory buckets. + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. This - // functionality is not supported for directory buckets. + // to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This functionality is not - // supported for directory buckets. + // encryption. The value of this header is a Base64 encoded string of a UTF-8 + // encoded JSON, which contains the encryption context as key-value pairs. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. SSEKMSEncryptionContext *string - // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption - // customer managed key to use for object encryption. This functionality is not - // supported for directory buckets. + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or + // x-amz-server-side-encryption:aws:kms:dsse , but do not provide + // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone + // storage class) in Availability Zones and ONEZONE_IA (the S3 One + // Zone-Infrequent Access storage class) in Dedicated Local Zones. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. This functionality is not supported for directory buckets. + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -469,65 +755,78 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, the response includes this header. The header // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see Aborting Incomplete Multipart Uploads - // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id - // header that provides the ID of the lifecycle configuration rule that defines the - // abort action. This functionality is not supported for directory buckets. + // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Object key for which the multipart upload was initiated. Key *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -582,6 +881,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -597,6 +899,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -633,6 +947,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go index e2d5a007d1..21db131942 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -15,64 +15,130 @@ import ( ) // Creates a session that establishes temporary security credentials to support -// fast authentication and authorization for the Zonal endpoint APIs on directory -// buckets. For more information about Zonal endpoint APIs that include the -// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) -// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory -// bucket, use the CreateSession API operation. Specifically, you grant -// s3express:CreateSession permission to a bucket in a bucket policy or an IAM -// identity-based policy. Then, you use IAM credentials to make the CreateSession -// API request on the bucket, which returns temporary security credentials that -// include the access key ID, secret access key, session token, and expiration. -// These credentials have associated permissions to access the Zonal endpoint APIs. -// After the session is created, you don’t need to use other policies to grant -// permissions to each Zonal endpoint API individually. Instead, in your Zonal -// endpoint API requests, you sign your requests by applying the temporary security -// credentials of the session to the request headers and following the SigV4 -// protocol for authentication. You also apply the session token to the -// x-amz-s3session-token request header for authorization. Temporary security -// credentials are scoped to the bucket and expire after 5 minutes. After the -// expiration time, any calls that you make with those credentials will fail. You -// must use IAM credentials again to make a CreateSession API request that -// generates a new set of temporary credentials for use. Temporary credentials -// cannot be extended or refreshed beyond the original specified interval. If you -// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// fast authentication and authorization for the Zonal endpoint API operations on +// directory buckets. For more information about Zonal endpoint API operations that +// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 +// User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission to a +// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM +// credentials to make the CreateSession API request on the bucket, which returns +// temporary security credentials that include the access key ID, secret access +// key, session token, and expiration. These credentials have associated +// permissions to access the Zonal endpoint API operations. After the session is +// created, you don’t need to use other policies to grant permissions to each Zonal +// endpoint API individually. Instead, in your Zonal endpoint API requests, you +// sign your requests by applying the temporary security credentials of the session +// to the request headers and following the SigV4 protocol for authentication. You +// also apply the session token to the x-amz-s3session-token request header for +// authorization. Temporary security credentials are scoped to the bucket and +// expire after 5 minutes. After the expiration time, any calls that you make with +// those credentials will fail. You must use IAM credentials again to make a +// CreateSession API request that generates a new set of temporary credentials for +// use. Temporary credentials cannot be extended or refreshed beyond the original +// specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes // automatically to avoid service interruptions when a session expires. We // recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see Performance -// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) -// in the Amazon S3 User Guide. +// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 +// User Guide. +// // - You must make requests for this API operation to the Zonal endpoint. These // endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. -// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the CopyObject API -// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// . -// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the HeadBucket API -// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// . +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints +// in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint API operations, the +// CopyObject API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// CopyObject API operation on directory buckets, see [CopyObject]. +// +// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the +// HeadBucket API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// HeadBucket API operation on directory buckets, see [HeadBucket]. // // Permissions To obtain temporary security credentials, you must create a bucket // policy or an IAM identity-based policy that grants s3express:CreateSession // permission to the bucket. In a policy, you can have the s3express:SessionMode // condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see -// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) -// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint -// APIs, the bucket policy should also grant both accounts the -// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - -// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] +// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 +// User Guide. +// +// To grant cross-account access to Zonal endpoint API operations, the bucket +// policy should also grant both accounts the s3express:CreateSession permission. +// +// If you want to encrypt objects with SSE-KMS, you must also have the +// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// Encryption For directory buckets, there are only two supported options for +// server-side encryption: server-side encryption with Amazon S3 managed keys +// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms +// ). We recommend that the bucket's default encryption uses the desired encryption +// configuration and you don't override the bucket default encryption in your +// CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the +// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low +// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must +// specify SSE-KMS as the directory bucket's default encryption configuration with +// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal +// endpoint API operations, new objects are automatically encrypted and decrypted +// with SSE-KMS and S3 Bucket Keys during the session. +// +// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] ( +// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default +// encryption configuration with a customer managed key, you can't change the +// customer managed key for the bucket's SSE-KMS configuration. +// +// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't +// override the values of the encryption settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession +// request. You don't need to explicitly specify these encryption settings values +// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings +// values from the CreateSession request to protect new objects in the directory +// bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not +// supported to override the values of the encryption settings from the +// CreateSession request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters +// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { if params == nil { params = &CreateSessionInput{} @@ -95,29 +161,111 @@ type CreateSessionInput struct { // This member is required. Bucket *string + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using KMS keys (SSE-KMS). + // + // S3 Bucket Keys are always enabled for GET and PUT operations in a directory + // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy + // SSE-KMS encrypted objects from general purpose buckets to directory buckets, + // from directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a + // copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + BucketKeyEnabled *bool + + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , you must specify the + // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key + // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you + // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key + // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist + // in the same account that't issuing the command, you must use the full Key ARN + // not the Key ID. + // + // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket's lifetime. + // The [Amazon Web Services managed key]( aws/s3 ) isn't supported. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + SSEKMSKeyId *string + + // The server-side encryption algorithm to use when you store objects in the + // directory bucket. + // + // For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 + // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, + // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 + // User Guide. + // + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html + ServerSideEncryption types.ServerSideEncryption + // Specifies the mode of the session that will be created, either ReadWrite or // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is - // capable of executing all the Zonal endpoint APIs on a directory bucket. A - // ReadOnly session is constrained to execute the following Zonal endpoint APIs: - // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and - // ListMultipartUploads . + // capable of executing all the Zonal endpoint API operations on a directory + // bucket. A ReadOnly session is constrained to execute the following Zonal + // endpoint API operations: GetObject , HeadObject , ListObjectsV2 , + // GetObjectAttributes , ListParts , and ListMultipartUploads . SessionMode types.SessionMode noSmithyDocumentSerde } func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CreateSessionOutput struct { - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // This member is required. Credentials *types.SessionCredentials + // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS + // keys (SSE-KMS). + BucketKeyEnabled *bool + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , this header indicates + // the ID of the KMS symmetric encryption customer managed key that was used for + // object encryption. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store objects in the + // directory bucket. + ServerSideEncryption types.ServerSideEncryption + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -167,6 +315,9 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +333,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSessionValidationMiddleware(stack); err != nil { return err } @@ -215,6 +378,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go index 30e1381bd6..9e1edf91ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -15,33 +15,45 @@ import ( // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. +// // - Directory buckets - If multipart uploads in a directory bucket are in // progress, you can't delete the bucket until all the in-progress multipart // uploads are aborted or completed. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - You must have the s3:DeleteBucket // permission on the specified bucket in a policy. +// // - Directory bucket permissions - You must have the s3express:DeleteBucket // permission in an IAM identity-based policy instead of a bucket policy. // Cross-account access to this API operation isn't supported. This operation can // only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucket : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucket : +// +// [CreateBucket] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { params = &DeleteBucketInput{} @@ -59,30 +71,36 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. Directory buckets - When you use this - // operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -137,6 +155,9 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -152,6 +173,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { return err } @@ -185,6 +218,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 0033825a0a..a3a1873fa0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -13,20 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an analytics -// configuration for the bucket (specified by the analytics configuration ID). To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 analytics feature, see Amazon S3 -// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to DeleteBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to DeleteBucketAnalyticsConfiguration : +// +// [GetBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &DeleteBucketAnalyticsConfigurationInput{} @@ -63,6 +75,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { } func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -117,6 +130,9 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +148,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -165,6 +193,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go index d465826fb4..ebae81c34d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -13,14 +13,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the cors -// configuration information set for the bucket. To use this operation, you must -// have permission to perform the s3:PutBucketCORS action. The bucket owner has -// this permission by default and can grant this permission to others. For -// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. Related Resources -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// This operation is not supported for directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// # Related Resources +// +// [PutBucketCors] +// +// [RESTOPTIONSobject] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { if params == nil { params = &DeleteBucketCorsInput{} @@ -52,6 +63,7 @@ type DeleteBucketCorsInput struct { } func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -106,6 +118,9 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -121,6 +136,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -154,6 +181,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go index 7be8c47590..bf654025bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -13,20 +13,46 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the DELETE action resets the default encryption for the bucket as server-side -// encryption with Amazon S3 managed keys (SSE-S3). For information about the -// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permissions to -// perform the s3:PutEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// This implementation of the DELETE action resets the default encryption for the +// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucketEncryption : +// +// [PutBucketEncryption] +// +// [GetBucketEncryption] +// +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { params = &DeleteBucketEncryptionInput{} @@ -47,18 +73,34 @@ type DeleteBucketEncryptionInput struct { // The name of the bucket containing the server-side encryption configuration to // delete. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -113,6 +155,9 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -128,6 +173,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -161,6 +218,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index 734d23b043..a62386c3ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -13,25 +13,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to DeleteBucketIntelligentTieringConfiguration include: -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// [GetBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &DeleteBucketIntelligentTieringConfigurationInput{} @@ -64,6 +77,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { } func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,6 +132,9 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -133,6 +150,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -166,6 +195,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 3b8d81a439..d24e3f754f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an inventory -// configuration (identified by the inventory ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . Operations related to DeleteBucketInventoryConfiguration include: -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// This operation is not supported for directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// [GetBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { params = &DeleteBucketInventoryConfigurationInput{} @@ -61,6 +75,7 @@ type DeleteBucketInventoryConfigurationInput struct { } func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,6 +130,9 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -130,6 +148,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -163,6 +193,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go index 88928b284a..489be24c42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -13,20 +13,61 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the lifecycle -// configuration from the specified bucket. Amazon S3 removes all the lifecycle -// configuration rules in the lifecycle subresource associated with the bucket. -// Your objects never expire, and Amazon S3 no longer automatically deletes any -// objects on the basis of rules contained in the deleted lifecycle configuration. -// To use this operation, you must have permission to perform the -// s3:PutLifecycleConfiguration action. By default, the bucket owner has this -// permission and the bucket owner can grant this permission to others. There is -// usually some time lag before lifecycle configuration deletion is fully -// propagated to all the Amazon S3 systems. For more information about the object -// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) -// . Related actions include: -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. +// +// Related actions include: +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { params = &DeleteBucketLifecycleInput{} @@ -52,12 +93,16 @@ type DeleteBucketLifecycleInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +157,9 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +175,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { return err } @@ -160,6 +220,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go new file mode 100644 index 0000000000..880d74d94b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a metadata table configuration from a general purpose bucket. For more +// +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to remove the metadata table + // configuration from. + // + // This member is required. + Bucket *string + + // The expected bucket owner of the general purpose bucket that you want to + // remove the metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketMetadataTableConfiguration", + } +} + +// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go index 21384351f5..ed96b02532 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -13,22 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes a metrics -// configuration for the Amazon CloudWatch request metrics (specified by the -// metrics configuration ID) from the bucket. Note that this doesn't include the -// daily storage metrics. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to DeleteBucketMetricsConfiguration : -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to DeleteBucketMetricsConfiguration : +// +// [GetBucketMetricsConfiguration] +// +// [PutBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { params = &DeleteBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type DeleteBucketMetricsConfigurationInput struct { } func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,6 +134,9 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -135,6 +152,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,6 +197,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go index 4beac6b092..4775a25161 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -13,14 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes OwnershipControls -// for an Amazon S3 bucket. To use this operation, you must have the -// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) -// . The following operations are related to DeleteBucketOwnershipControls : -// - GetBucketOwnershipControls -// - PutBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to DeleteBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # PutBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} @@ -52,6 +60,7 @@ type DeleteBucketOwnershipControlsInput struct { } func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -106,6 +115,9 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -121,6 +133,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -154,6 +178,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go index b8e1f56a14..048abe79e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -13,44 +13,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the DeleteBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns -// a 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the DeleteBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not using +// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:DeleteBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucketPolicy -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// # The following operations are related to DeleteBucketPolicy +// +// [CreateBucket] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { params = &DeleteBucketPolicyInput{} @@ -68,30 +83,36 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -146,6 +167,9 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -161,6 +185,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -194,6 +230,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go index 9fdc6bcf32..f5c434b8d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the replication -// configuration from the bucket. To use this operation, you must have permissions -// to perform the s3:PutReplicationConfiguration action. The bucket owner has -// these permissions by default and can grant it to others. For more information -// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . It can take a while for the deletion of a replication configuration to fully -// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// This operation is not supported for directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutReplicationConfiguration action. The bucket owner has these permissions by +// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// The following operations are related to DeleteBucketReplication : +// +// [PutBucketReplication] +// +// [GetBucketReplication] +// +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { params = &DeleteBucketReplicationInput{} @@ -42,7 +56,7 @@ func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBuck type DeleteBucketReplicationInput struct { - // The bucket name. + // The bucket name. // // This member is required. Bucket *string @@ -56,6 +70,7 @@ type DeleteBucketReplicationInput struct { } func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -110,6 +125,9 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -125,6 +143,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -158,6 +188,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go index ae737d40a3..ab73775a1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -13,13 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the tags from the -// bucket. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the // s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. The following operations are related to -// DeleteBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// can grant this permission to others. +// +// The following operations are related to DeleteBucketTagging : +// +// [GetBucketTagging] +// +// [PutBucketTagging] +// +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { params = &DeleteBucketTaggingInput{} @@ -51,6 +60,7 @@ type DeleteBucketTaggingInput struct { } func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -105,6 +115,9 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -120,6 +133,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -153,6 +178,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go index 425936dfac..6ea3b745da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -13,20 +13,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action removes the -// website configuration for a bucket. Amazon S3 returns a 200 OK response upon -// successfully deleting a website configuration on the specified bucket. You will -// get a 200 OK response if the website configuration you are trying to delete -// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket -// specified in the request does not exist. This DELETE action requires the -// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete -// the website configuration attached to a bucket. However, bucket owners can grant -// other users permission to delete the website configuration by writing a bucket -// policy granting them the S3:DeleteBucketWebsite permission. For more -// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . The following operations are related to DeleteBucketWebsite : -// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported for directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a bucket. +// However, bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. +// +// For more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// The following operations are related to DeleteBucketWebsite : +// +// [GetBucketWebsite] +// +// [PutBucketWebsite] +// +// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { params = &DeleteBucketWebsiteInput{} @@ -58,6 +69,7 @@ type DeleteBucketWebsiteInput struct { } func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +124,9 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +142,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -160,6 +187,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index c1e5ff73a9..32b3000e7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // Removes an object from a bucket. The behavior depends on the bucket's @@ -22,9 +23,7 @@ import ( // - If bucket versioning is enabled, the operation inserts a delete marker, // which becomes the current version of the object. To permanently delete an object // in a versioned bucket, you must include the object’s versionId in the request. -// For more information about versioning-enabled buckets, see Deleting object -// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html) -// . +// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. // // - If bucket versioning is suspended, the operation removes the object that // has a null versionId , if there is one, and inserts a delete marker that @@ -32,9 +31,7 @@ import ( // versionId , and all versions of the object have a versionId , Amazon S3 does // not remove the object and only inserts a delete marker. To permanently delete an // object that has a versionId , you must include the object’s versionId in the -// request. For more information about versioning-suspended buckets, see -// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html) -// . +// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is @@ -44,38 +41,45 @@ import ( // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // To remove a specific version, you must use the versionId query parameter. Using // this query parameter permanently deletes the version. If the object deleted is a // delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) -// in the Amazon S3 User Guide. To see sample requests that use versioning, see -// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) -// . Directory buckets - MFA delete is not supported by directory buckets. You can -// delete objects by explicitly calling DELETE Object or calling ( -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// ) to enable Amazon S3 to remove them for you. If you want to block users or -// accounts from removing or deleting objects from your bucket, you must deny them -// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User +// Guide. To see sample requests that use versioning, see [Sample Request]. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to +// enable Amazon S3 to remove them for you. If you want to block users or accounts +// from removing or deleting objects from your bucket, you must deny them the +// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must have the s3:DeleteObjectVersion -// permission. +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -83,13 +87,24 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is -// related to DeleteObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following action is related to DeleteObject : +// +// [PutObject] +// +// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { params = &DeleteObjectInput{} @@ -107,31 +122,40 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -143,8 +167,9 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // The account ID of the expected bucket owner. If the account ID that you provide @@ -152,31 +177,69 @@ type DeleteObjectInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // The If-Match header field makes the request method conditional on ETags. If the + // ETag value does not match, the operation returns a 412 Precondition Failed + // error. If the ETag matches or if the object doesn't exist, the operation will + // return a 204 Success (No Content) response . + // + // For more information about conditional requests, see [RFC 7232]. + // + // This functionality is only supported for directory buckets. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // If present, the object is deleted only if its modification times matches the + // provided Timestamp . If the Timestamp values do not match, the operation + // returns a 412 Precondition Failed error. If the Timestamp matches or if the + // object doesn’t exist, the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + IfMatchLastModifiedTime *time.Time + + // If present, the object is deleted only if its size matches the provided size in + // bytes. If the Size value does not match, the operation returns a 412 + // Precondition Failed error. If the Size matches or if the object doesn’t exist, + // the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + // + // You can use the If-Match , x-amz-if-match-last-modified-time and + // x-amz-if-match-size conditional headers in conjunction with each-other or + // individually. + IfMatchSize *int64 + // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. This functionality is not supported for directory buckets. + // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -187,16 +250,26 @@ type DeleteObjectOutput struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. This functionality is not supported for directory buckets. + // operation. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -248,6 +321,9 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -263,6 +339,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { return err } @@ -296,6 +384,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go index c5f31dec66..f62eea477c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -12,16 +12,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the entire tag -// set from the specified object. For more information about managing object tags, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. To delete tags of a specific object version, add -// the versionId query parameter in the request. You will need permission for the -// s3:DeleteObjectVersionTagging action. The following operations are related to -// DeleteObjectTagging : -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// This operation is not supported for directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see [Object Tagging]. +// +// To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging : +// +// [PutObjectTagging] +// +// [GetObjectTagging] +// +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { params = &DeleteObjectTaggingInput{} @@ -39,23 +50,28 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. Access - // points - When you use this action with an access point, you must provide the - // alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -77,6 +93,7 @@ type DeleteObjectTaggingInput struct { } func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -135,6 +152,9 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -150,6 +170,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -183,6 +215,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index 05f82cf756..f95db53dbd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -17,47 +17,58 @@ import ( // This operation enables you to delete multiple objects from a bucket using a // single HTTP request. If you know the object keys that you want to delete, then // this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. The request can contain a list of up to -// 1000 keys that you want to delete. In the XML, you provide the object key names, -// and optionally, version IDs if you want to delete a specific version of the -// object from a versioning-enabled bucket. For each key, Amazon S3 performs a -// delete operation and returns the result of that delete, success or failure, in -// the response. Note that if the object specified in the request is not found, -// Amazon S3 returns the result as deleted. +// requests, reducing per-request overhead. +// +// The request can contain a list of up to 1,000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if you +// want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. If the object +// specified in the request isn't found, Amazon S3 confirms the deletion by +// returning the result as deleted. +// // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // The operation supports two modes for the response: verbose and quiet. By // default, the operation uses verbose mode in which the response includes the // result of deletion of each key in your request. In quiet mode the response // includes only keys where the delete operation encountered an error. For a // successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. When performing this action -// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, -// you must include an MFA token. If you do not provide one, the entire request -// will fail, even if there are non-versioned objects you are trying to delete. If -// you provide an invalid token, whether there are versioned keys in the request or -// not, the entire Multi-Object Delete request will fail. For information about MFA -// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) -// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by -// directory buckets. Permissions +// information about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts to +// delete any versioned objects, you must include an MFA token. If you do not +// provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether there +// are versioned keys in the request or not, the entire Multi-Object Delete request +// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion // permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -65,26 +76,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Content-MD5 request header +// // - General purpose bucket - The Content-MD5 request header is required for all // Multi-Object Delete requests. Amazon S3 uses the header value to ensure that // your request body has not been altered in transit. +// // - Directory bucket - The Content-MD5 request header or a additional checksum // request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all // Multi-Object Delete requests. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to DeleteObjects : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to DeleteObjects : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { if params == nil { params = &DeleteObjectsInput{} @@ -102,31 +130,40 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,28 +175,41 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . If you provide an individual - // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -170,31 +220,38 @@ type DeleteObjectsInput struct { // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. When performing the DeleteObjects operation on an MFA delete - // enabled bucket, which attempts to delete the specified versioned objects, you - // must include an MFA token. If you don't provide an MFA token, the entire request - // will fail, even if there are non-versioned objects that you are trying to - // delete. If you provide an invalid token, whether there are versioned object keys - // in the request or not, the entire Multi-Object Delete request will fail. For - // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include an + // MFA token. If you don't provide an MFA token, the entire request will fail, even + // if there are non-versioned objects that you are trying to delete. If you provide + // an invalid token, whether there are versioned object keys in the request or not, + // the entire Multi-Object Delete request will fail. For information about MFA + // Delete, see [MFA Delete]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -210,7 +267,12 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -262,6 +324,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -277,6 +342,21 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { return err } @@ -316,6 +396,18 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -345,9 +437,10 @@ func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { } func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go index 43969e2b19..87e72df27a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -13,17 +13,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to DeletePublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// This operation is not supported for directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For +// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to DeletePublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { params = &DeletePublicAccessBlockInput{} @@ -55,6 +66,7 @@ type DeletePublicAccessBlockInput struct { } func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -109,6 +121,9 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -124,6 +139,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -157,6 +184,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index 4bb1ff71cf..a4bddac3c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -14,26 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the accelerate subresource to return the Transfer -// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 -// Transfer Acceleration is a bucket-level feature that enables you to perform -// faster data transfers to and from Amazon S3. To use this operation, you must -// have permission to perform the s3:GetAccelerateConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an -// existing bucket to Enabled or Suspended by using the -// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// operation. A GET accelerate request does not return a state value for a bucket -// that has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. For more information about -// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAccelerateConfiguration : -// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the +// s3:GetAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled or +// Suspended by using the [PutBucketAccelerateConfiguration] operation. +// +// A GET accelerate request does not return a state value for a bucket that has no +// transfer acceleration state. A bucket has no Transfer Acceleration state if a +// state has never been set on the bucket. +// +// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration : +// +// [PutBucketAccelerateConfiguration] +// +// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { if params == nil { params = &GetBucketAccelerateConfigurationInput{} @@ -65,16 +75,19 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,7 +95,12 @@ func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointP type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // The accelerate configuration of the bucket. @@ -137,6 +155,9 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -152,6 +173,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -185,6 +218,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go index bc7c4ea18b..fed95ac019 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -14,26 +14,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the acl subresource to return the access control list (ACL) -// of a bucket. To use GET to return the ACL of the bucket, you must have the -// READ_ACP access to the bucket. If READ_ACP permission is granted to the -// anonymous user, you can return the ACL of the bucket without using an -// authorization header. When you use this API operation with an access point, -// provide the alias of the access point in place of the bucket name. When you use -// this API operation with an Object Lambda access point, provide the alias of the -// Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code // InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAcl : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { params = &GetBucketAclInput{} @@ -51,14 +60,18 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -72,6 +85,7 @@ type GetBucketAclInput struct { } func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -133,6 +147,9 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -148,6 +165,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { return err } @@ -181,6 +210,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go index 64e41d4037..b0423c85ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -14,21 +14,33 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action returns an analytics configuration (identified by the analytics -// configuration ID) from the bucket. To use this operation, you must have -// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, -// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAnalyticsConfiguration : -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration : +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &GetBucketAnalyticsConfigurationInput{} @@ -65,6 +77,7 @@ type GetBucketAnalyticsConfigurationInput struct { } func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,6 +136,9 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -138,6 +154,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -171,6 +199,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go index 0997225ebc..ef6a970da4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -14,21 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the Cross-Origin -// Resource Sharing (CORS) configuration information set for the bucket. To use -// this operation, you must have permission to perform the s3:GetBucketCORS +// This operation is not supported for directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. When you use this API operation with an access point, provide the alias -// of the access point in place of the bucket name. When you use this API operation -// with an Object Lambda access point, provide the alias of the Object Lambda -// access point in place of the bucket name. If the Object Lambda access point -// alias in a request is not valid, the error code InvalidAccessPointAliasError is -// returned. For more information about InvalidAccessPointAliasError , see List of -// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// . The following operations are related to GetBucketCors : -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// others. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. +// +// The following operations are related to GetBucketCors : +// +// [PutBucketCors] +// +// [DeleteBucketCors] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { if params == nil { params = &GetBucketCorsInput{} @@ -46,14 +61,18 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -67,6 +86,7 @@ type GetBucketCorsInput struct { } func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -126,6 +146,9 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -141,6 +164,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -174,6 +209,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go index 22c1f9bb51..82b7211a18 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -14,20 +14,47 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the default -// encryption configuration for an Amazon S3 bucket. By default, all buckets have a -// default encryption configuration that uses server-side encryption with Amazon S3 -// managed keys (SSE-S3). For information about the bucket default encryption -// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permission to -// perform the s3:GetEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to GetBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:GetEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to GetBucketEncryption : +// +// [PutBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { params = &GetBucketEncryptionInput{} @@ -48,18 +75,34 @@ type GetBucketEncryptionInput struct { // The name of the bucket from which the server-side encryption configuration is // retrieved. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,6 +161,9 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -133,6 +179,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -166,6 +224,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index f3ae88a9b2..5a500f0a66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to GetBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &GetBucketIntelligentTieringConfigurationInput{} @@ -65,6 +78,7 @@ type GetBucketIntelligentTieringConfigurationInput struct { } func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,6 +137,9 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -138,6 +155,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -171,6 +200,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go index 123218e976..163c86ae73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -14,18 +14,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns an inventory -// configuration (identified by the inventory configuration ID) from the bucket. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . The following operations are related to GetBucketInventoryConfiguration : -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// The following operations are related to GetBucketInventoryConfiguration : +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { params = &GetBucketInventoryConfigurationInput{} @@ -62,6 +76,7 @@ type GetBucketInventoryConfigurationInput struct { } func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,6 +135,9 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -135,6 +153,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,6 +198,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go index ddcbbe0967..5803ff5b5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -14,35 +14,81 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Bucket lifecycle -// configuration now supports specifying a lifecycle rule using an object key name -// prefix, one or more object tags, object size, or any combination of these. -// Accordingly, this section describes the latest API. The previous version of the -// API supported filtering based only on an object key name prefix, which is -// supported for backward compatibility. For the related API description, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// . Accordingly, this section describes the latest API. The response describes the -// new filter element that you can use to specify a filter to select a subset of -// objects to which the rule applies. If you are using a previous version of the -// lifecycle configuration, it still works. For the earlier action, Returns the -// lifecycle configuration information set on the bucket. For information about -// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// . To use this operation, you must have permission to perform the -// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . GetBucketLifecycleConfiguration has the following special error: +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see [Object Lifecycle Management]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API, which +// is compatible with the new functionality. The previous version of the API +// supported filtering based only on an object key name prefix, which is supported +// for general purpose buckets for backward compatibility. For the related API +// description, see [GetBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects, transitions and tag +// filters are not supported. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:GetLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// GetBucketLifecycleConfiguration has the following special error: +// // - Error code: NoSuchLifecycleConfiguration +// // - Description: The lifecycle configuration does not exist. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // // The following operations are related to GetBucketLifecycleConfiguration : -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// [GetBucketLifecycle] +// +// [PutBucketLifecycle] +// +// [DeleteBucketLifecycle] +// +// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { params = &GetBucketLifecycleConfigurationInput{} @@ -68,12 +114,16 @@ type GetBucketLifecycleConfigurationInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -83,6 +133,25 @@ type GetBucketLifecycleConfigurationOutput struct { // Container for a lifecycle rule. Rules []types.LifecycleRule + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It isn't supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -132,6 +201,9 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -147,6 +219,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -180,6 +264,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go index aff5f3cd55..3d1397b7f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -20,23 +20,34 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns the Region the -// bucket resides in. You set the bucket's Region using the LocationConstraint -// request parameter in a CreateBucket request. For more information, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. When you use this API operation with -// an Object Lambda access point, provide the alias of the Object Lambda access -// point in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// to return the Region that a bucket resides in. For backward compatibility, -// Amazon S3 continues to support GetBucketLocation. The following operations are -// related to GetBucketLocation : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// This operation is not supported for directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see [CreateBucket]. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For +// backward compatibility, Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation : +// +// [GetObject] +// +// [CreateBucket] +// +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { if params == nil { params = &GetBucketLocationInput{} @@ -54,14 +65,18 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -75,6 +90,7 @@ type GetBucketLocationInput struct { } func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,8 +98,12 @@ func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLocationOutput struct { // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // . Buckets in Region us-east-1 have a LocationConstraint of null . + // supported location constraints by Region, see [Regions and Endpoints]. + // + // Buckets in Region us-east-1 have a LocationConstraint of null . Buckets with a + // LocationConstraint of EU reside in eu-west-1 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint types.BucketLocationConstraint // Metadata pertaining to the operation's result. @@ -135,6 +155,9 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -153,6 +176,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { return err } @@ -186,6 +221,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go index d1c4f8fbb9..c1f315fabd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -14,11 +14,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the logging -// status of a bucket and the permissions users have to view and modify that -// status. The following operations are related to GetBucketLogging : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// This operation is not supported for directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. +// +// The following operations are related to GetBucketLogging : +// +// [CreateBucket] +// +// [PutBucketLogging] +// +// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { if params == nil { params = &GetBucketLoggingInput{} @@ -50,6 +58,7 @@ type GetBucketLoggingInput struct { } func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -57,8 +66,10 @@ func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLoggingOutput struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *types.LoggingEnabled // Metadata pertaining to the operation's result. @@ -110,6 +121,9 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -125,6 +139,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -158,6 +184,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go new file mode 100644 index 0000000000..5d5e9685fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the metadata table configuration for a general purpose bucket. For +// +// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [DeleteBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that contains the metadata table configuration that + // you want to retrieve. + // + // This member is required. + Bucket *string + + // The expected owner of the general purpose bucket that you want to retrieve the + // metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketMetadataTableConfigurationOutput struct { + + // The metadata table configuration for the general purpose bucket. + GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketMetadataTableConfiguration", + } +} + +// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go index d7499c68cf..4fdf2ef428 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -14,21 +14,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets a metrics -// configuration (specified by the metrics configuration ID) from the bucket. Note -// that this doesn't include the daily storage metrics. To use this operation, you -// must have permissions to perform the s3:GetMetricsConfiguration action. The -// bucket owner has this permission by default. The bucket owner can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to GetBucketMetricsConfiguration : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to GetBucketMetricsConfiguration : +// +// [PutBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { params = &GetBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type GetBucketMetricsConfigurationInput struct { } func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -124,6 +138,9 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -139,6 +156,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -172,6 +201,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go index 73155110d8..1fb7d1643c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -14,24 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the notification -// configuration of a bucket. If notifications are not enabled on the bucket, the -// action returns an empty NotificationConfiguration element. By default, you must -// be the bucket owner to read the notification configuration of a bucket. However, -// the bucket owner can use a bucket policy to grant permission to other users to -// read this configuration with the s3:GetBucketNotification permission. When you -// use this API operation with an access point, provide the alias of the access -// point in place of the bucket name. When you use this API operation with an -// Object Lambda access point, provide the alias of the Object Lambda access point -// in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following action is related to GetBucketNotification : -// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// This operation is not supported for directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant +// permission to other users to read this configuration with the +// s3:GetBucketNotification permission. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about setting and reading the notification configuration +// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. +// +// The following action is related to GetBucketNotification : +// +// [PutBucketNotification] +// +// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { if params == nil { params = &GetBucketNotificationConfigurationInput{} @@ -49,15 +63,18 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. When - // you use this API operation with an access point, provide the alias of the access - // point in place of the bucket name. When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -71,6 +88,7 @@ type GetBucketNotificationConfigurationInput struct { } func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -143,6 +161,9 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +179,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -191,6 +224,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go index cea1514286..cdc07a5809 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -14,14 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:GetBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// . The following operations are related to GetBucketOwnershipControls : -// - PutBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to GetBucketOwnershipControls : +// +// # PutBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} @@ -53,6 +61,7 @@ type GetBucketOwnershipControlsInput struct { } func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +121,9 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +139,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -160,6 +184,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go index c2f98f9369..4a352aa89d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -13,47 +13,63 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the GetBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the GetBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:GetBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:GetBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following action is related to GetBucketPolicy : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following action is related to GetBucketPolicy : +// +// [GetObject] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { params = &GetBucketPolicyInput{} @@ -71,39 +87,48 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name to get the bucket policy for. Directory buckets - When you use - // this operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide Access points - When you use this API operation with - // an access point, provide the alias of the access point in place of the bucket - // name. Object Lambda access points - When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Object Lambda access points are not supported by directory buckets. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -162,6 +187,9 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -177,6 +205,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -210,6 +250,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go index cb36ac504a..db63caf6f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -14,18 +14,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the policy -// status for an Amazon S3 bucket, indicating whether the bucket is public. In -// order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For more information about when Amazon S3 considers a bucket public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetBucketPolicyStatus : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// This operation is not supported for directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see [Specifying Permissions in a Policy]. +// +// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. +// +// The following operations are related to GetBucketPolicyStatus : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { params = &GetBucketPolicyStatusInput{} @@ -57,6 +70,7 @@ type GetBucketPolicyStatusInput struct { } func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,6 +129,9 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -130,6 +147,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { return err } @@ -163,6 +192,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go index 7e44d38eef..ebc0d35cf9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -14,21 +14,37 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the replication -// configuration of a bucket. It can take a while to propagate the put or delete a -// replication configuration to all Amazon S3 systems. Therefore, a get request -// soon after put or delete can return a wrong result. For information about -// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. This action requires permissions for the -// s3:GetReplicationConfiguration action. For more information about permissions, -// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. For information about GetBucketReplication errors, -// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// This operation is not supported for directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete can +// return a wrong result. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see [Using Bucket Policies and User Policies]. +// +// If you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. +// +// For information about GetBucketReplication errors, see [List of replication-related error codes] +// // The following operations are related to GetBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// [PutBucketReplication] +// +// [DeleteBucketReplication] +// +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { params = &GetBucketReplicationInput{} @@ -60,6 +76,7 @@ type GetBucketReplicationInput struct { } func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -119,6 +136,9 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -134,6 +154,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -167,6 +199,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go index 16cc528222..34563cb0dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -14,11 +14,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the request -// payment configuration of a bucket. To use this version of the operation, you -// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to GetBucketRequestPayment : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported for directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to GetBucketRequestPayment : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { params = &GetBucketRequestPaymentInput{} @@ -50,6 +56,7 @@ type GetBucketRequestPaymentInput struct { } func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -108,6 +115,9 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -123,6 +133,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -156,6 +178,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go index 69a6e49030..a1d8d99c41 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -14,17 +14,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag set -// associated with the bucket. To use this operation, you must have permission to -// perform the s3:GetBucketTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. GetBucketTagging has the -// following special error: +// This operation is not supported for directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the +// s3:GetBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. +// +// GetBucketTagging has the following special error: +// // - Error code: NoSuchTagSet +// // - Description: There is no tag set associated with the bucket. // // The following operations are related to GetBucketTagging : -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [PutBucketTagging] +// +// [DeleteBucketTagging] +// +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { params = &GetBucketTaggingInput{} @@ -56,6 +67,7 @@ type GetBucketTaggingInput struct { } func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -116,6 +128,9 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -131,6 +146,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -164,6 +191,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go index 10540b136a..6e69fd4670 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -14,15 +14,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the versioning -// state of a bucket. To retrieve the versioning state of a bucket, you must be the -// bucket owner. This implementation also returns the MFA Delete status of the -// versioning state. If the MFA Delete status is enabled , the bucket owner must -// use an authentication device to change the versioning state of the bucket. The -// following operations are related to GetBucketVersioning : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported for directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning state. +// If the MFA Delete status is enabled , the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning : +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { if params == nil { params = &GetBucketVersioningInput{} @@ -54,6 +66,7 @@ type GetBucketVersioningInput struct { } func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -117,6 +130,9 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +148,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -165,6 +193,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go index c87f6ff1a4..ce0847c8c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -14,17 +14,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the website -// configuration for a bucket. To host website on Amazon S3, you can configure a -// bucket as website by adding a website configuration. For more information about -// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// This GET action requires the S3:GetBucketWebsite permission. By default, only // the bucket owner can read the bucket website configuration. However, bucket // owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. The following -// operations are related to GetBucketWebsite : -// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite : +// +// [DeleteBucketWebsite] +// +// [PutBucketWebsite] +// +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { params = &GetBucketWebsiteInput{} @@ -56,6 +65,7 @@ type GetBucketWebsiteInput struct { } func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -124,6 +134,9 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -139,6 +152,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -172,6 +197,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index a64f5964e1..e36ec4e3db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -16,100 +16,154 @@ import ( "time" ) -// Retrieves an object from Amazon S3. In the GetObject request, specify the full -// key name for the object. General purpose buckets - Both the virtual-hosted-style -// requests and the path-style requests are supported. For a virtual hosted-style -// request example, if you have the object photos/2006/February/sample.jpg , -// specify the object key name as /photos/2006/February/sample.jpg . For a -// path-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the -// object key name as /examplebucket/photos/2006/February/sample.jpg . For more -// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) -// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style -// requests are supported. For a virtual hosted-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named -// examplebucket--use1-az5--x-s3 , specify the object key name as +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg , specify the object key +// name as /photos/2006/February/sample.jpg . For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket , specify the object key name as +// /examplebucket/photos/2006/February/sample.jpg . For more information about +// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For a +// virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named +// amzn-s3-demo-bucket--usw2-az1--x-s3 , specify the object key name as // /photos/2006/February/sample.jpg . Also, when you make requests to this API // operation, your requests are sent to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . +// Path-style requests are not supported. For more information about endpoints in +// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about +// endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - You must have the required permissions // in a policy. To use GetObject , you must have the READ access to the object // (or version). If you grant READ access to the anonymous user, the GetObject // operation returns the object without using an authorization header. For more -// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If you include a versionId in your request -// header, you must have the s3:GetObjectVersion permission to access a specific -// version of an object. The s3:GetObject permission is not required in this -// scenario. If you request the current version of an object without a specific -// versionId in the request header, only the s3:GetObject permission is required. -// The s3:GetObjectVersion permission is not required in this scenario. If the -// object that you request doesn’t exist, the error that Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. +// +// If you include a versionId in your request header, you must have the +// +// s3:GetObjectVersion permission to access a specific version of an object. The +// s3:GetObject permission is not required in this scenario. +// +// If you request the current version of an object without a specific versionId in +// +// the request header, only the s3:GetObject permission is required. The +// s3:GetObjectVersion permission is not required in this scenario. +// +// If the object that you request doesn’t exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted using SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Storage classes If the object you are retrieving is stored in the S3 Glacier // Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the // S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep // Archive Access tier, before you can retrieve the object you must first restore a -// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the -// S3 Express One Zone storage class is supported to store newly created objects. -// Unsupported storage class values won't write a destination object and will -// respond with the HTTP status code 400 Bad Request . Encryption Encryption -// request headers, like x-amz-server-side-encryption , should not be sent for the -// GetObject requests, if your object uses server-side encryption with Amazon S3 -// managed encryption keys (SSE-S3), server-side encryption with Key Management -// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject -// requests for the object that uses these types of keys, you’ll get an HTTP 400 -// Bad Request error. Overriding response header values through the request There -// are times when you want to override certain response header values of a -// GetObject response. For example, you might override the Content-Disposition -// response header value through your GetObject request. You can override values -// for a set of response headers. These modified response header values are -// included only in a successful response, that is, when the HTTP status code 200 -// OK is returned. The headers you can override using the following query -// parameters in the request are a subset of the headers that Amazon S3 accepts -// when you create an object. The response headers that you can override for the -// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , -// Content-Language , Content-Type , and Expires . To override values for a set of -// response headers in the GetObject response, you can use the following query -// parameters in the request. +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 +// Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 +// One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported +// storage class values won't write a destination object and will respond with the +// HTTP status code 400 Bad Request . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for the GetObject requests, if your object uses server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you +// include the header in your GetObject requests for the object that uses these +// types of keys, you’ll get an HTTP 400 Bad Request error. +// +// Directory buckets - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Overriding response header values through the request There are times when you +// want to override certain response header values of a GetObject response. For +// example, you might override the Content-Disposition response header value +// through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the HTTP +// status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , +// Content-Type , and Expires . +// +// To override values for a set of response headers in the GetObject response, you +// can use the following query parameters in the request. +// // - response-cache-control +// // - response-content-disposition +// // - response-content-encoding +// // - response-content-language +// // - response-content-type +// // - response-expires // // When you use these parameters, you must sign the request by using either an // Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// unsigned (anonymous) request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// // The following operations are related to GetObject : -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [ListBuckets] +// +// [GetObjectAcl] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { if params == nil { params = &GetObjectInput{} @@ -127,35 +181,45 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this - // action with an Object Lambda access point, you must direct requests to the - // Object Lambda access point hostname. The Object Lambda access point hostname - // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. - // Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -174,37 +238,55 @@ type GetObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. If - // both of the If-Match and If-Unmodified-Since headers are present in the request - // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since - // condition evaluates to false ; then, S3 returns 200 OK and the data requested. - // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: If-None-Match - // condition evaluates to false , and; If-Modified-Since condition evaluates to - // true ; then, S3 returns 304 Not Modified status code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. If both - // of the If-None-Match and If-Modified-Since headers are present in the request - // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since - // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status - // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified HTTP status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. If both of the If-Match and - // If-Unmodified-Since headers are present in the request as follows: If-Match - // condition evaluates to true , and; If-Unmodified-Since condition evaluates to - // false ; then, S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -213,18 +295,23 @@ type GetObjectInput struct { PartNumber *int32 // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) - // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range Range *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. @@ -245,72 +332,97 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use when decrypting the object (for example, AES256 - // ). If you encrypt an object by using server-side encryption with - // customer-provided encryption keys (SSE-C) when you store the object in Amazon - // S3, then when you GET the object, you must use the following headers: + // Specifies the algorithm to use when decrypting the object (for example, AES256 ). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // Specifies the customer-provided encryption key that you originally provided for // Amazon S3 to encrypt the data before storing it. This value is used to decrypt // the object when recovering it and must match the one used when storing the data. // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object - // by using server-side encryption with customer-provided encryption keys (SSE-C) - // when you store the object in Amazon S3, then when you GET the object, you must - // use the following headers: + // x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. If you encrypt - // an object by using server-side encryption with customer-provided encryption keys - // (SSE-C) when you store the object in Amazon S3, then when you GET the object, - // you must use the following headers: + // to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. By default, the - // GetObject operation returns the current version of an object. To return a - // different version, use the versionId subresource. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // // - If you include a versionId in your request header, you must have the // s3:GetObjectVersion permission to access a specific version of an object. The // s3:GetObject permission is not required in this scenario. + // // - If you request the current version of an object without a specific versionId // in the request header, only the s3:GetObject permission is required. The // s3:GetObjectVersion permission is not required in this scenario. + // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. - // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) - // . + // + // For more information about versioning, see [PutBucketVersioning]. + // + // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html VersionId *string noSmithyDocumentSerde } func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -325,37 +437,55 @@ type GetObjectOutput struct { Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in the CreateMultipartUpload request. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -378,9 +508,11 @@ type GetObjectOutput struct { // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the // response. + // // - If the specified version in the request is a delete marker, the response // returns a 405 Method Not Allowed error and the Last-Modified: timestamp // response header. @@ -390,20 +522,35 @@ type GetObjectOutput struct { // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time - // Date and time when the object was last modified. General purpose buckets - When - // you specify a versionId of the object in your request, if the specified version - // in the request is a delete marker, the response returns a 405 Method Not Allowed - // error and the Last-Modified: timestamp response header. + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -415,20 +562,25 @@ type GetObjectOutput struct { // are prefixed with x-amz-meta- . This can happen if you create metadata using an // API like SOAP that supports more flexible metadata than the REST API. For // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. This functionality is not supported for directory buckets. + // headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. This - // functionality is not supported for directory buckets. + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that's currently in place for this object. This - // functionality is not supported for directory buckets. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. This - // functionality is not supported for directory buckets. + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -436,63 +588,78 @@ type GetObjectOutput struct { PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. This functionality is not supported - // for directory buckets. + // source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. This functionality is not supported for directory buckets. - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // restored object copy. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. Directory buckets - // - Only the S3 Express One Zone storage class is supported by directory buckets - // to store objects. + // for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass types.StorageClass // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) - // to retrieve the tag set associated with an object. This functionality is not - // supported for directory buckets. + // permission to read object tags. + // + // You can use [GetObjectTagging] to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + // + // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html TagCount *int32 - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -544,6 +711,9 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -556,6 +726,21 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addResponseChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectValidationMiddleware(stack); err != nil { return err } @@ -592,6 +777,18 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -620,13 +817,20 @@ func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { return string(in.ChecksumMode), true } +func setGetObjectRequestValidationModeMember(input interface{}, mode string) { + in := input.(*GetObjectInput) + in.ChecksumMode = types.ChecksumMode(mode) +} + func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ GetValidationMode: getGetObjectRequestValidationModeMember, - ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + SetValidationMode: setGetObjectRequestValidationModeMember, + ResponseChecksumValidation: options.ResponseChecksumValidation, + ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"}, IgnoreMultipartValidation: true, - LogValidationSkipped: true, - LogMultipartValidationSkipped: true, + LogValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, + LogMultipartValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, }) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index fc903cb390..85ff80f911 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -13,24 +13,39 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the access -// control list (ACL) of an object. To use this operation, you must have -// s3:GetObjectAcl permissions or READ_ACP access to the object. For more -// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on -// Outposts. By default, GET returns ACL information about the current version of -// an object. To return ACL information about a different version, use the -// versionId subresource. If your bucket uses the bucket owner enforced setting for -// S3 Object Ownership, requests to read ACLs are still supported and return the +// This operation is not supported for directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For +// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId +// subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetObjectAcl : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [DeleteObject] +// +// [PutObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { params = &GetObjectAclInput{} @@ -49,15 +64,19 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -76,20 +95,24 @@ type GetObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -100,11 +123,16 @@ type GetObjectAclOutput struct { // A list of grants. Grants []types.Grant - // Container for the bucket owner's display name and ID. + // Container for the bucket owner's display name and ID. Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -156,6 +184,9 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -171,6 +202,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { return err } @@ -204,6 +247,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index dd1b9257cd..0d438a9ed4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -14,43 +14,65 @@ import ( "time" ) -// Retrieves all the metadata from an object without returning the object itself. -// This operation is useful if you're interested only in an object's metadata. +// Retrieves all of the metadata from an object without returning the object +// itself. This operation is useful if you're interested only in an object's +// metadata. +// // GetObjectAttributes combines the functionality of HeadObject and ListParts . All -// of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . Directory buckets - For directory buckets, -// you must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// of the data returned with both of those individual calls can be returned with a +// single call to GetObjectAttributes . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - To use GetObjectAttributes , you must -// have READ access to the object. The permissions that you need to use this -// operation with depend on whether the bucket is versioned. If the bucket is -// versioned, you need both the s3:GetObjectVersion and -// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is -// not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, the -// error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found ("no such key") error. -// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden ("access denied") error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// have READ access to the object. +// +// The other permissions that you need to use this operation depend on whether the +// +// bucket is versioned and if a version ID is passed in the GetObjectAttributes +// request. +// +// - If you pass a version ID in your request, you need both the +// s3:GetObjectVersion and s3:GetObjectVersionAttributes permissions. +// +// - If you do not pass a version ID in your request, you need the s3:GetObject +// and s3:GetObjectAttributes permissions. +// +// For more information, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. +// +// If the object that you request does not exist, the error Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found ("no such key") error. +// +// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -61,49 +83,97 @@ import ( // want to specify the encryption method. If you include this header in a GET // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypted an object when you stored the object in Amazon S3 by using +// server-side encryption with customer-provided encryption keys (SSE-C), then when +// you retrieve the metadata from the object, you must use the following headers. +// These headers provide the server with the encryption key required to retrieve +// the object's metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't -// enabled and supported for directory buckets. For this API operation, only the -// null value of the version ID is supported by directory buckets. You can only -// specify null to the versionId query parameter in the request. Conditional -// request headers Consider the following when using request headers: +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, there are only two +// supported options for server-side encryption: server-side encryption with Amazon +// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// Versioning Directory buckets - S3 Versioning isn't enabled and supported for +// directory buckets. For this API operation, only the null value of the version +// ID is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// Conditional request headers Consider the following when using request headers: +// // - If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows, then Amazon S3 returns the HTTP status code 200 OK and the // data requested: +// // - If-Match condition evaluates to true . -// - If-Unmodified-Since condition evaluates to false . For more information -// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . +// +// - If-Unmodified-Since condition evaluates to false . +// +// For more information about conditional requests, see [RFC 7232]. +// // - If both of the If-None-Match and If-Modified-Since headers are present in // the request as follows, then Amazon S3 returns the HTTP status code 304 Not // Modified : -// - If-None-Match condition evaluates to false . -// - If-Modified-Since condition evaluates to true . For more information about -// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to GetObjectAttributes : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - If-None-Match condition evaluates to false . +// +// - If-Modified-Since condition evaluates to true . +// +// For more information about conditional requests, see [RFC 7232]. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following actions are related to GetObjectAttributes : +// +// [GetObject] +// +// [GetObjectAcl] +// +// [GetObjectLegalHold] +// +// [GetObjectLockConfiguration] +// +// [GetObjectRetention] +// +// [GetObjectTagging] +// +// [HeadObject] +// +// [ListParts] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [RFC 7232]: https://tools.ietf.org/html/rfc7232 +// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html +// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { if params == nil { params = &GetObjectAttributesInput{} @@ -121,31 +191,40 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -166,43 +245,55 @@ type GetObjectAttributesInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // Sets the maximum number of parts to return. + // Sets the maximum number of parts to return. For more information, see [Uploading and copying objects using multipart upload in Amazon S3] in the + // Amazon Simple Storage Service user guide. + // + // [Uploading and copying objects using multipart upload in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html MaxParts *int32 // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. + // part numbers will be listed. For more information, see [Uploading and copying objects using multipart upload in Amazon S3]in the Amazon Simple + // Storage Service user guide. + // + // [Uploading and copying objects using multipart upload in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html PartNumberMarker *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. S3 - // Versioning isn't enabled and supported for directory buckets. For this API + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this API // operation, only the null value of the version ID is supported by directory // buckets. You can only specify null to the versionId query parameter in the // request. @@ -212,6 +303,7 @@ type GetObjectAttributesInput struct { } func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -223,14 +315,18 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. + // To learn more about delete markers, see [Working with delete markers]. + // // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version // of a resource found at a URL. ETag *string - // The creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time // A collection of parts associated with a multipart upload. @@ -240,18 +336,29 @@ type GetObjectAttributesOutput struct { ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The version ID of the object. This functionality is not supported for directory - // buckets. + // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -303,6 +410,9 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -318,6 +428,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { return err } @@ -351,6 +473,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index 548f5e1cc6..497b5b3116 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets an object's current -// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectLegalHold : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets an object's current legal hold status. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { params = &GetObjectLegalHoldInput{} @@ -36,15 +43,20 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct + // retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +75,12 @@ type GetObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -76,6 +90,7 @@ type GetObjectLegalHoldInput struct { } func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,6 +149,9 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -149,6 +167,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -182,6 +212,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go index e8e2fbd9f3..889fcca6d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,12 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the Object Lock -// configuration for a bucket. The rule specified in the Object Lock configuration -// will be applied by default to every new object placed in the specified bucket. -// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . The following action is related to GetObjectLockConfiguration : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see [Locking Objects]. +// +// The following action is related to GetObjectLockConfiguration : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { if params == nil { params = &GetObjectLockConfigurationInput{} @@ -36,16 +42,20 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. Access points - // - When you use this action with an access point, you must provide the alias of - // the access point in place of the bucket name or specify the access point ARN. - // When using the access point ARN, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -59,6 +69,7 @@ type GetObjectLockConfigurationInput struct { } func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -117,6 +128,9 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +146,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -165,6 +191,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index b4daabf16f..3c5698e008 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves an object's -// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectRetention : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Retrieves an object's retention settings. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { params = &GetObjectRetentionInput{} @@ -36,15 +43,20 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct + // retrieve. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +75,12 @@ type GetObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -76,6 +90,7 @@ type GetObjectRetentionInput struct { } func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,6 +149,9 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -149,6 +167,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -182,6 +212,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index dc15914a0e..6535a1ab8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -13,20 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag-set of an -// object. You send the GET request against the tagging subresource associated with -// the object. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the // s3:GetObjectTagging action. By default, the GET action returns information about // current version of an object. For a versioned bucket, you can have multiple // versions of an object in your bucket. To retrieve tags of any other version, use // the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. For information about the -// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . The following actions are related to GetObjectTagging : -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// s3:GetObjectVersionTagging action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see [Object Tagging]. +// +// The following actions are related to GetObjectTagging : +// +// [DeleteObjectTagging] +// +// [GetObjectAttributes] +// +// [PutObjectTagging] +// +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { params = &GetObjectTaggingInput{} @@ -45,22 +60,27 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -79,10 +99,12 @@ type GetObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -92,6 +114,7 @@ type GetObjectTaggingInput struct { } func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -155,6 +178,9 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -170,6 +196,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -203,6 +241,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index 9fc83178e6..529da3eb54 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -14,14 +14,24 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns torrent files -// from a bucket. BitTorrent can save you bandwidth when you're distributing large -// files. You can get torrent only for objects that are less than 5 GB in size, and -// that are not encrypted using server-side encryption with a customer-provided -// encryption key. To use GET, you must have READ access to the object. This -// functionality is not supported for Amazon S3 on Outposts. The following action -// is related to GetObjectTorrent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// This operation is not supported for directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and that +// are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent : +// +// [GetObject] +// +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { params = &GetObjectTorrentInput{} @@ -58,16 +68,19 @@ type GetObjectTorrentInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -78,7 +91,12 @@ type GetObjectTorrentOutput struct { Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -130,6 +148,9 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -142,6 +163,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { return err } @@ -175,6 +208,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go index 3689a4e163..97005c1080 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -14,22 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:GetBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock settings are different between the bucket and the account, // Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetPublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to GetPublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [PutPublicAccessBlock] +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { params = &GetPublicAccessBlockInput{} @@ -62,6 +78,7 @@ type GetPublicAccessBlockInput struct { } func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -121,6 +138,9 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -136,6 +156,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -169,6 +201,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go index 5f5958916a..a76f0313ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -19,42 +19,59 @@ import ( // You can use this operation to determine if a bucket exists and if you have // permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. If the bucket does not exist or you do not -// have permission to access it, the HEAD request returns a generic 400 Bad Request -// , 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these HTTP response codes. Directory -// buckets - You must make requests for this API operation to the Zonal endpoint. -// These endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory bucket - You must use IAM credentials to authenticate and authorize +// you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, the +// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found +// code. A message body is not included, so you cannot determine the exception +// beyond these HTTP response codes. +// +// Authentication and authorization General purpose buckets - Request to public +// buckets that grant the s3:ListBucket permission publicly do not need to be +// signed. All other HeadBucket requests must be authenticated and signed by using +// IAM credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source , must be +// signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions +// // - General purpose bucket permissions - To use this operation, you must have // permissions to perform the s3:ListBucket action. The bucket owner has this // permission by default and can grant this permission to others. For more -// information about permissions, see Managing access permissions to your Amazon -// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have the s3express:CreateSession // permission in the Action element of a policy. By default, the session is in // the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. For more -// information about example bucket policies, see Example bucket policies for S3 -// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// the s3express:SessionMode condition key to ReadOnly on the bucket. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . +// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 +// +// User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in +// Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -72,36 +89,47 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this API - // operation with an Object Lambda access point, provide the alias of the Object - // Lambda access point in place of the bucket name. If the Object Lambda access - // point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -115,6 +143,7 @@ type HeadBucketInput struct { } func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -122,21 +151,25 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { type HeadBucketOutput struct { // Indicates whether the bucket name used in the request is an access point alias. - // This functionality is not supported for directory buckets. + // + // For directory buckets, the value of this field is false . AccessPointAlias *bool - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket is created. An - // example AZ ID value is usw2-az1 . This functionality is only supported by - // directory buckets. + // The name of the location where the bucket will be created. + // + // For directory buckets, the Zone ID of the Availability Zone or the Local Zone + // where the bucket is created. An example Zone ID value for an Availability Zone + // is usw2-az1 . + // + // This functionality is only supported by directory buckets. BucketLocationName *string - // The type of location where the bucket is created. This functionality is only - // supported by directory buckets. + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. BucketLocationType types.LocationType - // The Region that the bucket is located. This functionality is not supported for - // directory buckets. + // The Region that the bucket is located. BucketRegion *string // Metadata pertaining to the operation's result. @@ -188,6 +221,9 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -203,6 +239,18 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { return err } @@ -236,23 +284,21 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) -} - -var _ HeadBucketAPIClient = (*Client)(nil) - // BucketExistsWaiterOptions are waiter options for BucketExistsWaiter type BucketExistsWaiterOptions struct { @@ -285,12 +331,13 @@ type BucketExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -366,7 +413,13 @@ func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBuck } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -415,6 +468,9 @@ func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -450,12 +506,13 @@ type BucketNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -532,7 +589,13 @@ func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadB } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -577,9 +640,26 @@ func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 5b7e9b6c35..a1463ca9e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -19,43 +19,53 @@ import ( // The HEAD operation retrieves metadata from an object without returning the // object itself. This operation is useful if you're interested only in an object's -// metadata. A HEAD request has the same options as a GET operation on an object. -// The response is identical to the GET response except that there is no response +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response // body. Because of this, if the HEAD request generates an error, it returns a // generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 // Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. Request headers -// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. +// +// Permissions +// // - General purpose bucket permissions - To use HEAD , you must have the // s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see Actions, resources, -// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) -// in the Amazon S3 User Guide. If the object you request doesn't exist, the error -// that Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 +// User Guide. For more information about the permissions to S3 API operations by +// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. +// +// If the object you request doesn't exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If you enable x-amz-checksum-mode in the request and the object is encrypted +// +// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you +// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM +// identity-based policies and KMS key policies for the KMS key to retrieve the +// checksum of the object. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -66,20 +76,27 @@ import ( // want to specify the encryption method. If you include this header in a HEAD // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Versioning // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -88,18 +105,40 @@ import ( // - If the specified version is a delete marker, the response returns a 405 // Method Not Allowed error and the Last-Modified: timestamp response header. // -// - Directory buckets - Delete marker is not supported by directory buckets. +// - Directory buckets - Delete marker is not supported for directory buckets. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to HeadObject : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For directory buckets, you must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in the +// format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// The following actions are related to HeadObject : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { params = &HeadObjectInput{} @@ -117,31 +156,40 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -151,10 +199,19 @@ type HeadObjectInput struct { // This member is required. Key *string - // To retrieve the checksum, this parameter must be enabled. In addition, if you - // enable ChecksumMode and the object is encrypted with Amazon Web Services Key - // Management Service (Amazon Web Services KMS), you must have permission to use - // the kms:Decrypt action for the request to succeed. + // To retrieve the checksum, this parameter must be enabled. + // + // General purpose buckets - If you enable checksum mode and the object is + // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you + // must have permission to use the kms:Decrypt action to retrieve the checksum. + // + // Directory buckets - If you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must + // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM + // identity-based policies and KMS key policies for the KMS key to retrieve the + // checksum of the object. + // + // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html ChecksumMode types.ChecksumMode // The account ID of the expected bucket owner. If the account ID that you provide @@ -163,40 +220,71 @@ type HeadObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. If both of the - // If-Match and If-Unmodified-Since headers are present in the request as follows: + // specified; otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. If both of the - // If-None-Match and If-Modified-Since headers are present in the request as - // follows: + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. If both of the If-Match - // and If-Unmodified-Since headers are present in the request as follows: + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -214,39 +302,64 @@ type HeadObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -257,54 +370,77 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. This functionality is not supported for - // directory buckets. + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -319,28 +455,45 @@ type HeadObjectOutput struct { // Size of the body in bytes. ContentLength *int64 + // The portion of the object returned in the response for a GET request. + ContentRange *string + // A standard MIME type describing the format of the object data. ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. This - // functionality is not supported for directory buckets. + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + // Date and time when the object was last modified. LastModified *time.Time @@ -352,26 +505,34 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. This functionality - // is not supported for directory buckets. + // can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. This - // functionality is not supported for directory buckets. + // only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -379,89 +540,121 @@ type HeadObjectOutput struct { PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. In replication, you have - // a source bucket on which you configure replication and destination bucket or - // buckets where Amazon S3 stores object replicas. When you request an object ( - // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will - // return the x-amz-replication-status header in the response as follows: + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication and + // destination bucket or buckets where Amazon S3 stores object replicas. When you + // request an object ( GetObject ) or object metadata ( HeadObject ) from these + // buckets, Amazon S3 will return the x-amz-replication-status header in the + // response as follows: + // // - If requesting an object from the source bucket, Amazon S3 will return the // x-amz-replication-status header if the object in your request is eligible for - // replication. For example, suppose that in your replication configuration, you - // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with - // key prefix TaxDocs . Any objects you upload with this key name prefix, for - // example TaxDocs/document1.pdf , are eligible for replication. For any object - // request with this key name prefix, Amazon S3 will return the - // x-amz-replication-status header with value PENDING, COMPLETED or FAILED - // indicating object replication status. + // replication. + // + // For example, suppose that in your replication configuration, you specify object + // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix + // TaxDocs . Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf , are eligible for replication. For any object request + // with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // // - If requesting an object from a destination bucket, Amazon S3 will return // the x-amz-replication-status header with value REPLICA if the object in your // request is a replica that Amazon S3 created and there is no replica modification // replication in progress. + // // - When replicating objects to multiple destination buckets, the // x-amz-replication-status header acts differently. The header of the source // object will only return a value of COMPLETED when replication is successful to // all destinations. The header will remain at value PENDING until replication has // completed for all destinations. If one or more destinations fails replication // the header will return FAILED. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // . This functionality is not supported for directory buckets. + // + // For more information, see [Replication]. + // + // This functionality is not supported for directory buckets. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // or an archive copy is already restored. If an archive copy is already restored, - // the header value indicates when Amazon S3 is scheduled to delete the object - // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 - // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header - // returns the value ongoing-request="true" . For more information about archiving - // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) - // . This functionality is not supported for directory buckets. Only the S3 Express - // One Zone storage class is supported by directory buckets to store objects. + // progress (see [RestoreObject]or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value + // ongoing-request="true" . + // + // For more information about archiving objects, see [Transitioning Objects: General Considerations]. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. + // + // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by - // directory buckets to store objects. + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -513,6 +706,9 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -528,6 +724,18 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { return err } @@ -561,23 +769,21 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) -} - -var _ HeadObjectAPIClient = (*Client)(nil) - // ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter type ObjectExistsWaiterOptions struct { @@ -610,12 +816,13 @@ type ObjectExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -691,7 +898,13 @@ func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObje } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -740,6 +953,9 @@ func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -775,12 +991,13 @@ type ObjectNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -857,7 +1074,13 @@ func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadO } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -902,9 +1125,26 @@ func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 67b7571c11..2580b44aa5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -14,27 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the analytics -// configurations for the bucket. You can have up to 1,000 analytics configurations -// per bucket. This action supports list pagination and does not return more than -// 100 configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated is -// set to false. If there are more configurations to list, IsTruncated is set to -// true, and there will be a value in NextContinuationToken . You use the +// This operation is not supported for directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element in +// the response. If there are no more configurations to list, IsTruncated is set +// to false. If there are more configurations to list, IsTruncated is set to true, +// and there will be a value in NextContinuationToken . You use the // NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. To use -// this operation, you must have permissions to perform the +// the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to ListBucketAnalyticsConfigurations : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to ListBucketAnalyticsConfigurations : +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { params = &ListBucketAnalyticsConfigurationsInput{} @@ -70,6 +83,7 @@ type ListBucketAnalyticsConfigurationsInput struct { } func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -142,6 +156,9 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -157,6 +174,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -190,6 +219,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index 729f878567..2a77b68048 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to ListBucketIntelligentTieringConfigurations include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { if params == nil { params = &ListBucketIntelligentTieringConfigurationsInput{} @@ -64,6 +77,7 @@ type ListBucketIntelligentTieringConfigurationsInput struct { } func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -136,6 +150,9 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -151,6 +168,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -184,6 +213,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go index 6c879048ca..0bd202161e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -14,26 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of -// inventory configurations for the bucket. You can have up to 1,000 analytics -// configurations per bucket. This action supports list pagination and does not -// return more than 100 configurations at a time. Always check the IsTruncated -// element in the response. If there are no more configurations to list, -// IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there is a value in NextContinuationToken . You -// use the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] +// // The following operations are related to ListBucketInventoryConfigurations : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { params = &ListBucketInventoryConfigurationsInput{} @@ -71,6 +85,7 @@ type ListBucketInventoryConfigurationsInput struct { } func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -143,6 +158,9 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +176,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -191,6 +221,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go index 0b6ca94733..7f99075af4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -13,28 +13,42 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the metrics -// configurations for the bucket. The metrics configurations are only for the -// request metrics of the bucket and do not provide information on daily storage -// metrics. You can have up to 1,000 configurations per bucket. This action -// supports list pagination and does not return more than 100 configurations at a -// time. Always check the IsTruncated element in the response. If there are no -// more configurations to list, IsTruncated is set to false. If there are more -// configurations to list, IsTruncated is set to true, and there is a value in -// NextContinuationToken . You use the NextContinuationToken value to continue the -// pagination of the list by passing the value in continuation-token in the -// request to GET the next page. To use this operation, you must have permissions -// to perform the s3:GetMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to ListBucketMetricsConfigurations : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// This operation is not supported for directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For more information about metrics configurations and CloudWatch request +// metrics, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to ListBucketMetricsConfigurations : +// +// [PutBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { params = &ListBucketMetricsConfigurationsInput{} @@ -72,6 +86,7 @@ type ListBucketMetricsConfigurationsInput struct { } func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -145,6 +160,9 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -160,6 +178,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -193,6 +223,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go index 086d9d2900..aec9715d68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -13,11 +13,23 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of all -// buckets owned by the authenticated sender of the request. To use this operation, -// you must have the s3:ListAllMyBuckets permission. For information about Amazon -// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// . +// This operation is not supported for directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To grant IAM permission to use this operation, you must add the +// s3:ListAllMyBuckets policy action. +// +// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. +// +// We strongly recommend using only paginated ListBuckets requests. Unpaginated +// ListBuckets requests are only supported for Amazon Web Services accounts set to +// the default general purpose bucket quota of 10,000. If you have an approved +// general purpose bucket quota above 10,000, you must send paginated ListBuckets +// requests to list your account’s buckets. All unpaginated ListBuckets requests +// will be rejected for Amazon Web Services accounts with a general purpose bucket +// quota greater than 10,000. +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { params = &ListBucketsInput{} @@ -34,6 +46,44 @@ func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optF } type ListBucketsInput struct { + + // Limits the response to buckets that are located in the specified Amazon Web + // Services Region. The Amazon Web Services Region must be expressed according to + // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) + // Region. For a list of the valid values for all of the Amazon Web Services + // Regions, see [Regions and Endpoints]. + // + // Requests made to a Regional endpoint that is different from the bucket-region + // parameter are not supported. For example, if you want to limit the response to + // your buckets in Region us-west-2 , the request must be made to an endpoint in + // Region us-west-2 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + BucketRegion *string + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + // + // Length Constraints: Minimum length of 0. Maximum length of 1024. + // + // Required: No. + // + // If you specify the bucket-region , prefix , or continuation-token query + // parameters without using max-buckets to set the maximum number of buckets + // returned in the response, Amazon S3 applies a default page size of 10,000 and + // provides a continuation token if there are more buckets. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxBuckets *int32 + + // Limits the response to bucket names that begin with the specified bucket name + // prefix. + Prefix *string + noSmithyDocumentSerde } @@ -42,9 +92,20 @@ type ListBucketsOutput struct { // The list of buckets owned by the requester. Buckets []types.Bucket + // ContinuationToken is included in the response when there are more buckets that + // can be listed with pagination. The next ListBuckets request to Amazon S3 can be + // continued with this ContinuationToken . ContinuationToken is obfuscated and is + // not a real bucket. + ContinuationToken *string + // The owner of the buckets listed. Owner *types.Owner + // If Prefix was sent with the request, it is included in the response. + // + // All bucket names in the response begin with the specified bucket name prefix. + Prefix *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -94,6 +155,9 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -109,6 +173,18 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { return err } @@ -139,9 +215,115 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } +// ListBucketsPaginatorOptions is the paginator options for ListBuckets +type ListBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListBucketsPaginator is a paginator for ListBuckets +type ListBucketsPaginator struct { + options ListBucketsPaginatorOptions + client ListBucketsAPIClient + params *ListBucketsInput + nextToken *string + firstPage bool +} + +// NewListBucketsPaginator returns a new ListBucketsPaginator +func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator { + if params == nil { + params = &ListBucketsInput{} + } + + options := ListBucketsPaginatorOptions{} + if params.MaxBuckets != nil { + options.Limit = *params.MaxBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListBuckets page. +func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxBuckets = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListBucketsAPIClient is a client that implements the ListBuckets operation. +type ListBucketsAPIClient interface { + ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error) +} + +var _ ListBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go index 3ebf78af19..d8d85e9d67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -15,23 +15,32 @@ import ( ) // Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see -// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Regional endpoint. These endpoints -// support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions You must have the -// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy -// instead of a bucket policy. Cross-account access to this API operation isn't -// supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . +// sender of the request. For more information about directory buckets, see [Directory buckets]in the +// Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in +// an IAM identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The BucketRegion response element is not part of the ListDirectoryBuckets +// Response Syntax. +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { if params == nil { params = &ListDirectoryBucketsInput{} @@ -50,8 +59,9 @@ func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectory type ListDirectoryBucketsInput struct { // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. + // buckets in this account with a token. ContinuationToken is obfuscated and is + // not a real bucket name. You can use this ContinuationToken for the pagination + // of the list results. ContinuationToken *string // Maximum number of buckets to be returned in response. When the number is more @@ -125,6 +135,9 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -140,6 +153,18 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { return err } @@ -170,17 +195,21 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - // ListDirectoryBucketsPaginatorOptions is the paginator options for // ListDirectoryBuckets type ListDirectoryBucketsPaginatorOptions struct { @@ -247,6 +276,9 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... } params.MaxDirectoryBuckets = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -266,6 +298,14 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index 183773651a..5915d0168f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -16,38 +16,49 @@ import ( // This operation lists in-progress multipart uploads in a bucket. An in-progress // multipart upload is a multipart upload that has been initiated by the // CreateMultipartUpload request, but has not yet been completed or aborted. +// // Directory buckets - If multipart uploads in a directory bucket are in progress, // you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. The ListMultipartUploads operation returns a maximum of -// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is -// also the default value. You can further limit the number of uploads in a -// response by specifying the max-uploads request parameter. If there are more -// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, -// the response returns an IsTruncated element with the value of true , a -// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining -// multipart uploads, you need to make subsequent ListMultipartUploads requests. -// In these requests, include two query parameters: key-marker and upload-id-marker -// . Set the value of key-marker to the NextKeyMarker value from the previous -// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker -// value from the previous response. Directory buckets - The upload-id-marker -// element and the NextUploadIdMarker element aren't supported by directory -// buckets. To list the additional multipart uploads, you only need to set the -// value of key-marker to the NextKeyMarker value from the previous response. For -// more information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// aborted or completed. To delete these in-progress multipart uploads, use the +// ListMultipartUploads operation to list the in-progress multipart uploads in the +// bucket and use the AbortMultipartUpload operation to abort all the in-progress +// multipart uploads. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default value. +// You can further limit the number of uploads in a response by specifying the +// max-uploads request parameter. If there are more than 1,000 multipart uploads +// that satisfy your ListMultipartUploads request, the response returns an +// IsTruncated element with the value of true , a NextKeyMarker element, and a +// NextUploadIdMarker element. To list the remaining multipart uploads, you need to +// make subsequent ListMultipartUploads requests. In these requests, include two +// query parameters: key-marker and upload-id-marker . Set the value of key-marker +// to the NextKeyMarker value from the previous response. Similarly, set the value +// of upload-id-marker to the NextUploadIdMarker value from the previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -55,29 +66,49 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting of multipart uploads in response +// // - General purpose bucket - In the ListMultipartUploads response, the multipart // uploads are sorted based on two criteria: +// // - Key-based sorting - Multipart uploads are initially sorted in ascending // order based on their object keys. +// // - Time-based sorting - For uploads that share the same object key, they are // further sorted in ascending order based on the upload initiation time. Among // uploads with the same key, the one that was initiated first will appear before // the ones that were initiated later. +// // - Directory bucket - In the ListMultipartUploads response, the multipart // uploads aren't sorted lexicographically based on the object keys. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListMultipartUploads : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListMultipartUploads : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { if params == nil { params = &ListMultipartUploadsInput{} @@ -95,50 +126,69 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string - // Character you use to group keys. All keys that contain the same string between - // the prefix, if specified, and the first occurrence of the delimiter after the - // prefix are grouped under a single result element, CommonPrefixes . If you don't - // specify the prefix parameter, then the substring starts at the beginning of the - // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. Directory buckets - For directory buckets, / - // is the only supported delimiter. + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and the + // first occurrence of the delimiter after the prefix are grouped under a single + // result element, CommonPrefixes . If you don't specify the prefix parameter, then + // the substring starts at the beginning of the key. The keys that are grouped + // under CommonPrefixes result element are not returned elsewhere in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -147,20 +197,26 @@ type ListMultipartUploadsInput struct { ExpectedBucketOwner *string // Specifies the multipart upload after which listing should begin. + // // - General purpose buckets - For general purpose buckets, key-marker is an // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. If upload-id-marker is not - // specified, only the keys lexicographically greater than the specified - // key-marker will be included in the list. If upload-id-marker is specified, any - // multipart uploads for a key equal to the key-marker might also be included, - // provided those multipart uploads have upload IDs lexicographically greater than - // the specified upload-id-marker . + // multipart upload after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to the + // key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker . + // // - Directory buckets - For directory buckets, key-marker is obfuscated and // isn't a real object key. The upload-id-marker parameter isn't supported by // directory buckets. To list the additional multipart uploads, you only need to // set the value of key-marker to the NextKeyMarker value from the previous - // response. In the ListMultipartUploads response, the multipart uploads aren't - // sorted lexicographically based on the object keys. + // response. + // + // In the ListMultipartUploads response, the multipart uploads aren't sorted + // lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the @@ -171,32 +227,38 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) Directory buckets - For directory buckets, only - // prefixes that end in a delimiter ( / ) are supported. + // use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde } func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -210,20 +272,25 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // distinct key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. Directory - // buckets - For directory buckets, / is the only supported delimiter. + // delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If you - // specify the encoding-type request parameter, Amazon S3 includes this element in - // the response, and returns encoded key name values in the following response - // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . EncodingType types.EncodingType // Indicates whether the returned list of multipart uploads is truncated. A value @@ -244,26 +311,34 @@ type ListMultipartUploadsOutput struct { NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. This - // functionality is not supported for directory buckets. + // for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. - // Directory buckets - For directory buckets, only prefixes that end in a delimiter - // ( / ) are supported. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -319,6 +394,9 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -334,6 +412,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { return err } @@ -367,6 +457,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index bcb90eb2d1..d65191283c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -13,19 +13,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns metadata about -// all versions of the objects in a bucket. You can also use request parameters as -// selection criteria to return metadata about a subset of all the object versions. +// This operation is not supported for directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// // To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK -// response can contain valid or invalid XML. Make sure to design your application -// to parse the contents of the response and handle it appropriately. To use this -// operation, you must have READ access to the bucket. The following operations are -// related to ListObjectVersions : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// s3:ListBucketVersions action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { if params == nil { params = &ListObjectVersionsInput{} @@ -55,12 +70,20 @@ type ListObjectVersionsInput struct { // are not returned elsewhere in the response. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -93,10 +116,12 @@ type ListObjectVersionsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Specifies the object version you want to start listing from. @@ -106,6 +131,7 @@ type ListObjectVersionsInput struct { } func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -117,7 +143,10 @@ type ListObjectVersionsOutput struct { // calculating the number of returns. CommonPrefixes []types.CommonPrefix - // Container for an object that is a delete marker. + // Container for an object that is a delete marker. To learn more about delete + // markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarkers []types.DeleteMarkerEntry // The delimiter grouping the included keys. A delimiter is a character that you @@ -127,10 +156,13 @@ type ListObjectVersionsOutput struct { // max-keys limitation. These keys are not returned elsewhere in the response. Delimiter *string - // Encoding type used by Amazon S3 to encode object key names in the XML response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -164,7 +196,12 @@ type ListObjectVersionsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Marks the last version of the key returned in a truncated response. @@ -222,6 +259,9 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -237,6 +277,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { return err } @@ -270,6 +322,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go index 9a3bf3e024..6c2e8afa7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -13,19 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns some or all (up -// to 1,000) of the objects in a bucket. You can use the request parameters as -// selection criteria to return a subset of the objects in a bucket. A 200 OK -// response can contain valid or invalid XML. Be sure to design your application to -// parse the contents of the response and handle it appropriately. This action has -// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// , when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects . The following operations are related to ListObjects : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// This operation is not supported for directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. +// +// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], +// when developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects . +// +// The following operations are related to ListObjects : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListBuckets] +// +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { if params == nil { params = &ListObjectsInput{} @@ -43,31 +59,40 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. Directory buckets - When you use - // this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -75,12 +100,20 @@ type ListObjectsInput struct { // A delimiter is a character that you use to group keys. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +146,7 @@ type ListObjectsInput struct { } func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -121,14 +155,20 @@ func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { type ListObjectsOutput struct { // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by the delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix . For example, if the prefix - // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the - // common prefix is notes/summer/ . All of the keys that roll up into a common - // prefix count as a single return when calculating the number of returns. + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. @@ -141,9 +181,20 @@ type ListObjectsOutput struct { // MaxKeys value. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If using - // url , non-ASCII characters used in an object's key name will be URL encoded. For - // example, the object test_file(3).png will appear as test_file%283%29.png. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -163,18 +214,24 @@ type ListObjectsOutput struct { // When the response is truncated (the IsTruncated element value in the response // is true ), you can use the key name in this field as the marker parameter in // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. This element is returned only if you have the delimiter - // request parameter specified. If the response does not include the NextMarker - // element and it is truncated, you can use the value of the last Key element in - // the response as the marker parameter in the subsequent request to get the next - // set of object keys. + // in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it is + // truncated, you can use the value of the last Key element in the response as the + // marker parameter in the subsequent request to get the next set of object keys. NextMarker *string // Keys that begin with the indicated prefix. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -226,6 +283,9 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -241,6 +301,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsValidationMiddleware(stack); err != nil { return err } @@ -274,6 +346,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go index ee09d3cbde..f5a8f5657c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -17,26 +17,34 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see -// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the +// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets]. +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't +// return prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, ListObjectsV2 response includes +// the prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - To use this operation, you must have // READ access to the bucket. You must have permission to perform the // s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -44,24 +52,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting order of returned objects +// // - General purpose bucket - For general purpose buckets, ListObjectsV2 returns // objects in lexicographical order based on their key names. +// // - Directory bucket - For directory buckets, ListObjectsV2 does not return // objects in lexicographical order. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the -// latest revision of this action. We recommend that you use this revised API -// operation for application development. For backward compatibility, Amazon S3 -// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// . The following operations are related to ListObjectsV2 : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, [ListObjects]. +// +// The following operations are related to ListObjectsV2 : +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { params = &ListObjectsV2Input{} @@ -79,30 +106,38 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Directory buckets - When you use this operation with a directory bucket, you + // Directory buckets - When you use this operation with a directory bucket, you // must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,18 +148,31 @@ type ListObjectsV2Input struct { ContinuationToken *string // A delimiter is a character that you use to group keys. - // - Directory buckets - For directory buckets, / is the only supported - // delimiter. + // + // - Directory buckets - For directory buckets, / is the only supported delimiter. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If using - // url , non-ASCII characters used in an object's key name will be URL encoded. For - // example, the object test_file(3).png will appear as test_file%283%29.png. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -134,8 +182,10 @@ type ListObjectsV2Input struct { // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner - // field to true . Directory buckets - For directory buckets, the bucket owner is - // returned as the object owner for all objects. + // field to true . + // + // Directory buckets - For directory buckets, the bucket owner is returned as the + // object owner for all objects. FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the @@ -144,29 +194,35 @@ type ListObjectsV2Input struct { MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. This functionality is not supported - // for directory buckets. + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. OptionalObjectAttributes []types.OptionalObjectAttributes - // Limits the response to keys that begin with the specified prefix. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. This functionality is not supported for directory buckets. + // their requests. + // + // This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. This - // functionality is not supported for directory buckets. + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde } func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -176,43 +232,57 @@ type ListObjectsV2Output struct { // All of the keys (up to 1,000) that share the same prefix are grouped together. // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. A response can contain CommonPrefixes only if - // you specify a delimiter. CommonPrefixes contains all (if there are any) keys - // between Prefix and the next occurrence of the string specified by a delimiter. + // keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by a delimiter. + // // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . For example, if the prefix is notes/ and the delimiter is - // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All - // of the keys that roll up into a common prefix count as a single return when - // calculating the number of returns. + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. + // // - Directory buckets - For directory buckets, only prefixes that end in a // delimiter ( / ) are supported. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - // You can use this ContinuationToken for pagination of the list results. + // If ContinuationToken was sent with the request, it is included in the + // response. You can use the returned ContinuationToken for pagination of the list + // response. You can use this ContinuationToken for pagination of the list + // results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. Directory buckets - For directory buckets, / is the only - // supported delimiter. + // MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: Delimiter, Prefix, Key, and StartAfter . + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter . EncodingType types.EncodingType // Set to false if all of the results were returned. Set to true if more keys are @@ -239,16 +309,24 @@ type ListObjectsV2Output struct { // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. Directory buckets - For directory - // buckets, only prefixes that end in a delimiter ( / ) are supported. + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged - // If StartAfter was sent with the request, it is included in the response. This - // functionality is not supported for directory buckets. + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -300,6 +378,9 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -315,6 +396,18 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { return err } @@ -348,23 +441,21 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) -} - -var _ ListObjectsV2APIClient = (*Client)(nil) - // ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 type ListObjectsV2PaginatorOptions struct { // Sets the maximum number of keys returned in the response. By default, the @@ -430,6 +521,9 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O } params.MaxKeys = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -452,6 +546,20 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O return result, nil } +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 3f3946d935..995cf4e535 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -14,55 +14,81 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. To use -// this operation, you must provide the upload ID in the request. You obtain this -// uploadID by sending the initiate multipart upload request through -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of // 1,000 parts is also the default value. You can restrict the number of parts in a // response by specifying the max-parts request parameter. If your multipart // upload consists of more than 1,000 parts, the response returns an IsTruncated // field with the value of true , and a NextPartNumberMarker element. To list // remaining uploaded parts, in subsequent ListParts requests, include the // part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. For more -// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. If the upload was created using server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must -// have permission to the kms:Decrypt action for the ListParts request to -// succeed. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// NextPartNumberMarker field value from the previous response. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListParts : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If the upload was created using server-side encryption with Key Management +// +// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt +// action for the ListParts request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListParts : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [GetObjectAttributes] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { params = &ListPartsInput{} @@ -80,31 +106,40 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. Directory buckets - // - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -135,37 +170,46 @@ type ListPartsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -177,17 +221,21 @@ type ListPartsOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, then the response includes this header indicating // when the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // . The response will also include the x-amz-abort-rule-id header that will - // provide the ID of the lifecycle configuration rule that defines this action. + // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -197,13 +245,22 @@ type ListPartsOutput struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Container element that identifies who initiated the multipart upload. If the // initiator is an Amazon Web Services account, this element provides the same // information as the Owner element. If the initiator is an IAM User, this element // provides the user ARN and display name. Initiator *types.Initiator - // Indicates whether the returned list of parts is truncated. A true value + // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. IsTruncated *bool @@ -221,8 +278,10 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. Directory buckets - The bucket owner is - // returned as the object owner for all the parts. + // the parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the parts. Owner *types.Owner // Specifies the part after which listing should begin. Only parts with higher @@ -234,12 +293,19 @@ type ListPartsOutput struct { Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged - // The class of storage used to store the uploaded object. Directory buckets - - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -294,6 +360,9 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -309,6 +378,18 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPartsValidationMiddleware(stack); err != nil { return err } @@ -342,23 +423,21 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) -} - -var _ ListPartsAPIClient = (*Client)(nil) - // ListPartsPaginatorOptions is the paginator options for ListParts type ListPartsPaginatorOptions struct { // Sets the maximum number of parts to return. @@ -422,6 +501,9 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio } params.MaxParts = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -444,6 +526,20 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio return result, nil } +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go index c15d55f1f9..51de19c80c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -15,30 +15,45 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the accelerate -// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a -// bucket-level feature that enables you to perform faster data transfers to Amazon -// S3. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the // s3:PutAccelerateConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The Transfer Acceleration state of a bucket can be set to one of the following +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The Transfer Acceleration state of a bucket can be set to one of the following // two values: +// // - Enabled – Enables accelerated data transfers to the bucket. +// // - Suspended – Disables accelerated data transfers to the bucket. // -// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// action returns the transfer acceleration state of a bucket. After setting the -// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty -// minutes before the data transfer rates to the bucket increase. The name of the -// bucket used for Transfer Acceleration must be DNS-compliant and must not contain -// periods ("."). For more information about transfer acceleration, see Transfer -// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . The following operations are related to PutBucketAccelerateConfiguration : -// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it might +// take up to thirty minutes before the data transfer rates to the bucket increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant and +// must not contain periods ("."). +// +// For more information about transfer acceleration, see [Transfer Acceleration]. +// +// The following operations are related to PutBucketAccelerateConfiguration : +// +// [GetBucketAccelerateConfiguration] +// +// [CreateBucket] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { params = &PutBucketAccelerateConfigurationInput{} @@ -66,14 +81,17 @@ type PutBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -85,6 +103,7 @@ type PutBucketAccelerateConfigurationInput struct { } func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -139,6 +158,9 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -154,6 +176,21 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -190,6 +227,18 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -219,9 +268,10 @@ func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{} } func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go index f88bb4af22..a7d37a86b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -15,89 +15,159 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the permissions on -// an existing bucket using access control lists (ACL). For more information, see -// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can -// use one of the following two ways to set a bucket's permissions: +// This operation is not supported for directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the +// WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// // - Specify the ACL in the request body +// // - Specify permissions using request headers // // You cannot specify access permission using both the body and the request -// headers. Depending on your application needs, you may choose to set the ACL on a -// bucket using either the request body or the headers. For example, if you have an +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an // existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions by using -// one of the following methods: +// can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions by using one of the following +// methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use the // x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see -// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects -// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" +// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-write header grants create, overwrite, +// +// and delete objects permission to LogDelivery group predefined by Amazon S3 and +// two Amazon Web Services accounts identified by their email addresses. +// +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// +// id="111122223333", id="555566667777" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>& +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // The following operations are related to PutBucketAcl : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetObjectAcl] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { if params == nil { params = &PutBucketAclInput{} @@ -126,21 +196,27 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -158,9 +234,10 @@ type PutBucketAclInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string // Allows grantee to write the ACL for the applicable bucket. @@ -170,6 +247,7 @@ type PutBucketAclInput struct { } func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -224,6 +302,9 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -239,6 +320,21 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { return err } @@ -278,6 +374,18 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -307,9 +415,10 @@ func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 0604fb930a..8e13fbcdae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -14,45 +14,67 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets an analytics -// configuration for the bucket (specified by the analytics configuration ID). You -// can have up to 1,000 analytics configurations per bucket. You can choose to have -// storage class analysis export analysis reports sent to a comma-separated values -// (CSV) flat file. See the DataExport request element. Reports are updated daily -// and are based on the object filters that you configure. When selecting data -// export, you specify a destination bucket and an optional destination prefix -// where the file is written. You can export the data to a destination bucket in a -// different account. However, the destination bucket must be in the same Region as -// the bucket that you are making the PUT analytics configuration to. For more -// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . You must create a bucket policy on the destination bucket where the exported +// This operation is not supported for directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . To use this operation, you must have permissions to perform the +// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketAnalyticsConfiguration has the following special errors: +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketAnalyticsConfiguration has the following special errors: +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: InvalidArgument +// // - Cause: Invalid argument. +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: TooManyConfigurations +// // - Cause: You are attempting to create a new configuration but have already // reached the 1,000-configuration limit. +// // - HTTP Error: HTTP 403 Forbidden +// // - Code: AccessDenied +// // - Cause: You are not the owner of the specified bucket, or you do not have // the s3:PutAnalyticsConfiguration bucket permission to set the configuration on // the bucket. // // The following operations are related to PutBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &PutBucketAnalyticsConfigurationInput{} @@ -94,6 +116,7 @@ type PutBucketAnalyticsConfigurationInput struct { } func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -148,6 +171,9 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -163,6 +189,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -196,6 +234,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go index 3e6604ef6a..653a1598a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -15,35 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the cors -// configuration for your bucket. If the configuration exists, Amazon S3 replaces -// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// This operation is not supported for directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. You set this configuration on a bucket so that the bucket can service +// others. +// +// You set this configuration on a bucket so that the bucket can service // cross-origin requests. For example, you might want to enable a request whose // origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. To -// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// my.example.bucket.com by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors // subresource to the bucket. The cors subresource is an XML document in which you // configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. When Amazon S3 -// receives a cross-origin request (or a pre-flight OPTIONS request) against a -// bucket, it evaluates the cors configuration on the bucket and uses the first -// CORSRule rule that matches the incoming browser request to enable a cross-origin -// request. For a rule to match, the following conditions must be met: +// on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS +// request) against a bucket, it evaluates the cors configuration on the bucket +// and uses the first CORSRule rule that matches the incoming browser request to +// enable a cross-origin request. For a rule to match, the following conditions +// must be met: +// // - The request's Origin header must match AllowedOrigin elements. +// // - The request method (for example, GET, PUT, HEAD, and so on) or the // Access-Control-Request-Method header in case of a pre-flight OPTIONS request // must be one of the AllowedMethod elements. +// // - Every header specified in the Access-Control-Request-Headers request header // of a pre-flight request must match an AllowedHeader element. // -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketCors : -// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketCors : +// +// [GetBucketCors] +// +// [DeleteBucketCors] +// +// [RESTOPTIONSobject] +// +// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { params = &PutBucketCorsInput{} @@ -67,27 +86,34 @@ type PutBucketCorsInput struct { Bucket *string // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) - // in the Amazon S3 User Guide. + // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + // + // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html // // This member is required. CORSConfiguration *types.CORSConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +125,7 @@ type PutBucketCorsInput struct { } func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -153,6 +180,9 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +198,21 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -207,6 +252,18 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +293,10 @@ func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index dfc71dc5c6..87d25702cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -15,30 +15,116 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action uses the -// encryption subresource to configure default encryption and Amazon S3 Bucket Keys -// for an existing bucket. By default, all buckets have a default encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does -// not validate the KMS key ID provided in PutBucketEncryption requests. This -// action requires Amazon Web Services Signature Version 4. For more information, -// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// . To use this operation, you must have permission to perform the -// s3:PutEncryptionConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketEncryption : -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// This operation configures default encryption and Amazon S3 Bucket Keys for an +// existing bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets +// +// - You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. +// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the +// Amazon S3 User Guide. +// +// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify +// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID +// provided in PutBucketEncryption requests. +// +// - Directory buckets - You can optionally configure default encryption for a +// bucket by using server-side encryption with Key Management Service (KMS) keys +// (SSE-KMS). +// +// - We recommend that the bucket's default encryption uses the desired +// encryption configuration and you don't override the bucket default encryption in +// your CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] +// . +// +// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket's +// lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory +// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy +// SSE-KMS encrypted objects from general purpose buckets to directory buckets, +// from directory buckets to general purpose buckets, or between directory buckets, +// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a +// copy request is made for a KMS-encrypted object. +// +// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the +// key ID or key ARN. The key alias format of the KMS key isn't supported. +// +// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to +// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption +// requests. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. +// +// Also, this action requires Amazon Web Services Signature Version 4. For more +// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// To set a directory bucket default encryption with SSE-KMS, you must also have +// +// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketEncryption : +// +// [GetBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job +// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { params = &PutBucketEncryptionInput{} @@ -57,13 +143,18 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // different key options. By default, all buckets have a default encryption - // configuration that uses server-side encryption with Amazon S3 managed keys - // (SSE-S3). You can optionally configure default encryption for a bucket by using - // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a - // customer-provided key (SSE-C). For information about the bucket default - // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon S3 User Guide. + // different key options. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string @@ -73,31 +164,45 @@ type PutBucketEncryptionInput struct { // This member is required. ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the server-side encryption - // configuration. For requests made using the Amazon Web Services Command Line - // Interface (CLI) or Amazon Web Services SDKs, this field is calculated - // automatically. + // The Base64 encoded 128-bit MD5 digest of the server-side encryption + // configuration. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -152,6 +257,9 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +275,21 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -206,6 +329,18 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -235,9 +370,10 @@ func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index 61d73da569..c032b4039c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -14,37 +14,58 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Puts a S3 -// Intelligent-Tiering configuration to the specified bucket. You can have up to -// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to PutBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] // // You only need S3 Intelligent-Tiering enabled on a bucket if you want to // automatically move objects stored in the S3 Intelligent-Tiering storage class to // the Archive Access or Deep Archive Access tier. -// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP -// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad -// Request Error Code: TooManyConfigurations Cause: You are attempting to create a -// new configuration but have already reached the 1,000-configuration limit. HTTP -// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you -// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set -// the configuration on the bucket. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission +// to set the configuration on the bucket. +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &PutBucketIntelligentTieringConfigurationInput{} @@ -82,6 +103,7 @@ type PutBucketIntelligentTieringConfigurationInput struct { } func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -136,6 +158,9 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -151,6 +176,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -184,6 +221,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go index 03d79a0d8d..773ba76e55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -14,48 +14,76 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the PUT action adds an inventory configuration (identified by the inventory ID) -// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// This operation is not supported for directory buckets. +// +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// // Amazon S3 inventory generates inventories of the objects in the bucket on a // daily or weekly basis, and the results are published to a flat file. The bucket // that is inventoried is called the source bucket, and the bucket where the // inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. When -// you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate the -// inventory daily or weekly. You can also configure what object metadata to -// include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon S3 User Guide. You must create a bucket policy on the destination -// bucket to grant permissions to Amazon S3 to write objects to the bucket in the -// defined location. For an example policy, see Granting Permissions for Amazon S3 -// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . Permissions To use this operation, you must have permission to perform the +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the +// destination bucket where you want the inventory to be stored, and whether to +// generate the inventory daily or weekly. You can also configure what object +// metadata to include and whether to inventory all object versions or only current +// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For an +// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// Permissions To use this operation, you must have permission to perform the // s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. The -// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) -// report that includes all object metadata fields available and to specify the +// default and can grant this permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report +// that includes all object metadata fields available and to specify the // destination bucket to store the inventory. A user with read access to objects in // the destination bucket can also access all object metadata fields that are -// available in the inventory report. To restrict access to an inventory report, -// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) -// in the Amazon S3 User Guide. For more information about the metadata fields -// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) -// in the Amazon S3 User Guide. For more information about permissions, see -// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following -// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid -// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. The following -// operations are related to PutBucketInventoryConfiguration : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// available in the inventory report. +// +// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. +// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] +// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutInventoryConfiguration bucket permission to set +// the configuration on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration : +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html +// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 +// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { params = &PutBucketInventoryConfigurationInput{} @@ -97,6 +125,7 @@ type PutBucketInventoryConfigurationInput struct { } func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -151,6 +180,9 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +198,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -199,6 +243,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go index 88096fdd13..567b807733 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -15,26 +15,43 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a new lifecycle -// configuration for the bucket or replaces an existing lifecycle configuration. -// Keep in mind that this will overwrite an existing lifecycle configuration, so if -// you want to retain any configuration details, they must be included in the new -// lifecycle configuration. For information about lifecycle configuration, see -// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// . Bucket lifecycle configuration now supports specifying a lifecycle rule using +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see [Managing your storage lifecycle]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, object size, or any // combination of these. Accordingly, this section describes the latest API. The // previous version of the API supported filtering based only on an object key name // prefix, which is supported for backward compatibility. For the related API -// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// . Rules You specify the lifecycle configuration in your request body. The -// lifecycle configuration is specified as XML consisting of one or more rules. An -// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not -// adjustable. Each rule consists of the following: +// description, see [PutBucketLifecycle]. +// +// Rules Permissions HTTP Host header syntax You specify the lifecycle +// configuration in your request body. The lifecycle configuration is specified as +// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can +// have up to 1,000 rules. This limit is not adjustable. +// +// Bucket lifecycle configuration supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility for general purpose buckets. For +// the related API description, see [PutBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects,transitions and tag +// filters are not supported. +// +// A lifecycle rule consists of the following: +// // - A filter identifying a subset of objects to which the rule applies. The // filter can be based on a key name prefix, object tags, object size, or any // combination of these. +// // - A status indicating whether the rule is in effect. +// // - One or more lifecycle transition and expiration actions that you want // Amazon S3 to perform on the objects identified by the filter. If the state of // your bucket is versioning-enabled or versioning-suspended, you can have many @@ -42,28 +59,69 @@ import ( // versions). Amazon S3 provides predefined actions that you can specify for // current and noncurrent object versions. // -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) -// . Permissions By default, all Amazon S3 resources are private, including -// buckets, objects, and related subresources (for example, lifecycle configuration -// and website configuration). Only the resource owner (that is, the Amazon Web -// Services account that created it) can access the resource. The resource owner -// can optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration -// permission. You can also explicitly deny permissions. An explicit deny also -// supersedes any other permissions. If you want to block users or accounts from -// removing or deleting objects from your bucket, you must deny them permissions -// for the following actions: -// - s3:DeleteObject -// - s3:DeleteObjectVersion -// - s3:PutLifecycleConfiguration +// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. // -// For more information about permissions, see Managing Access Permissions to Your -// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to PutBucketLifecycleConfiguration : -// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes any +// +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The following operations are related to PutBucketLifecycleConfiguration : +// +// [GetBucketLifecycleConfiguration] +// +// [DeleteBucketLifecycle] +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { params = &PutBucketLifecycleConfigurationInput{} @@ -86,33 +144,79 @@ type PutBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *types.BucketLifecycleConfiguration + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + noSmithyDocumentSerde } func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketLifecycleConfigurationOutput struct { + + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -162,6 +266,9 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -177,6 +284,21 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -216,6 +338,18 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -245,9 +379,10 @@ func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) } func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go index fb80d2ee1b..53342aca48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -15,39 +15,68 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Set the logging -// parameters for a bucket and to specify permissions for who can view and modify -// the logging parameters. All logs are saved to buckets in the same Amazon Web -// Services Region as the source bucket. To set the logging status of a bucket, you -// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL -// to all logs. You use the Grantee request element to grant access to other -// people. The Permissions request element specifies the kind of access the -// grantee has to the logs. If the target bucket for log delivery uses the bucket -// owner enforced setting for S3 Object Ownership, you can't use the Grantee -// request element to grant access to others. Permissions can only be granted using -// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) -// to whom you're assigning access rights (by using request elements) in the -// following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By Email address: <>Grantees@email.com<> The grantee is resolved to the -// CanonicalUser and, in a response to a GETObjectAcl request, appears as the -// CanonicalUser. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// This operation is not supported for directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the +// Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (by using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By Email address: +// +// <>Grantees@email.com<> +// +// The grantee is resolved to the CanonicalUser and, in a response to a +// +// GETObjectAcl request, appears as the CanonicalUser. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: For -// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) -// in the Amazon S3 User Guide. For more information about creating a bucket, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . For more information about returning the logging status of a bucket, see -// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) -// . The following operations are related to PutBucketLogging : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// disable logging, you use an empty BucketLoggingStatus request element: +// +// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User +// Guide. +// +// For more information about creating a bucket, see [CreateBucket]. For more information about +// returning the logging status of a bucket, see [GetBucketLogging]. +// +// The following operations are related to PutBucketLogging : +// +// [PutObject] +// +// [DeleteBucket] +// +// [CreateBucket] +// +// [GetBucketLogging] +// +// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { if params == nil { params = &PutBucketLoggingInput{} @@ -75,19 +104,23 @@ type PutBucketLoggingInput struct { // This member is required. BucketLoggingStatus *types.BucketLoggingStatus - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutBucketLogging request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the PutBucketLogging request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +132,7 @@ type PutBucketLoggingInput struct { } func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -153,6 +187,9 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +205,21 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -207,6 +259,18 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +300,10 @@ func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go index bff1452b97..e386b1e9af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -14,29 +14,44 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets a metrics -// configuration (specified by the metrics configuration ID) for the bucket. You -// can have up to 1,000 metrics configurations per bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to PutBucketMetricsConfiguration : -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// This operation is not supported for directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) for +// the bucket. You can have up to 1,000 metrics configurations per bucket. If +// you're updating an existing metrics configuration, note that this is a full +// replacement of the existing metrics configuration. If you don't include the +// elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to PutBucketMetricsConfiguration : +// +// [DeleteBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] // // PutBucketMetricsConfiguration has the following special error: +// // - Error code: TooManyConfigurations +// // - Description: You are attempting to create a new configuration but have // already reached the 1,000-configuration limit. +// // - HTTP Status Code: HTTP 400 Bad Request +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} @@ -79,6 +94,7 @@ type PutBucketMetricsConfigurationInput struct { } func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -133,6 +149,9 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -148,6 +167,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -181,6 +212,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go index e937b5c594..9100fb1976 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -14,41 +14,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Enables notifications of -// specified events for a bucket. For more information about event notifications, -// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . Using this API, you can replace an existing notification configuration. The +// This operation is not supported for directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see [Configuring Event Notifications]. +// +// Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. By default, your -// bucket has no event notifications configured. That is, the notification -// configuration will be an empty NotificationConfiguration . This action -// replaces the existing notification configuration with the configuration you -// include in the request body. After Amazon S3 receives this request, it first -// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon -// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner -// has permission to publish to it by sending a test notification. In the case of -// Lambda destinations, Amazon S3 verifies that the Lambda function permissions -// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For -// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . You can disable notifications by adding the empty NotificationConfiguration -// element. For more information about the number of event notification -// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) -// in Amazon Web Services General Reference. By default, only the bucket owner can -// configure notifications on a bucket. However, bucket owners can use a bucket -// policy to grant permission to other users to set this configuration with the -// required s3:PutBucketNotification permission. The PUT notification is an atomic -// operation. For example, suppose your notification configuration includes SNS -// topic, SQS queue, and Lambda function configurations. When you send a PUT -// request with this configuration, Amazon S3 sends test messages to your SNS -// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will -// not add the configuration to your bucket. If the configuration in the request -// body includes only one TopicConfiguration specifying only the -// s3:ReducedRedundancyLostObject event type, the response will also include the -// x-amz-sns-test-message-id header containing the message ID of the test -// notification sent to the topic. The following action is related to -// PutBucketNotificationConfiguration : -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration . +// +// This action replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon Simple +// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) +// destination exists, and that the bucket owner has permission to publish to it by +// sending a test notification. In the case of Lambda destinations, Amazon S3 +// verifies that the Lambda function permissions grant Amazon S3 permission to +// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations that +// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your +// notification configuration includes SNS topic, SQS queue, and Lambda function +// configurations. When you send a PUT request with this configuration, Amazon S3 +// sends test messages to your SNS topic. If the message fails, the entire PUT +// action will fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration : +// +// [GetBucketNotificationConfiguration] +// +// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { params = &PutBucketNotificationConfigurationInput{} @@ -90,6 +108,7 @@ type PutBucketNotificationConfigurationInput struct { } func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -144,6 +163,9 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -159,6 +181,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -192,6 +226,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go index 94875b7554..ffa37aab9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:PutBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) -// . The following operations are related to PutBucketOwnershipControls : -// - GetBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using object ownership]. +// +// The following operations are related to PutBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html +// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} @@ -51,9 +59,23 @@ type PutBucketOwnershipControlsInput struct { // This member is required. OwnershipControls *types.OwnershipControls - // The MD5 hash of the OwnershipControls request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm header sent. Otherwise, Amazon S3 fails the request + // with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] in the + // Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumAlgorithm types.ChecksumAlgorithm + + // The MD5 hash of the OwnershipControls request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -65,6 +87,7 @@ type PutBucketOwnershipControlsInput struct { } func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -119,6 +142,9 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -134,6 +160,21 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -173,6 +214,18 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -191,10 +244,21 @@ func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *a } } +// getPutBucketOwnershipControlsRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. +func getPutBucketOwnershipControlsRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*PutBucketOwnershipControlsInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ - GetAlgorithm: nil, + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getPutBucketOwnershipControlsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go index 88e3f2633f..1764f4928d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -15,48 +15,66 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - -// For directory buckets, you must make requests for this API operation to the -// Regional endpoint. These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the PutBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the PutBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:PutBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:PutBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following operations are related to PutBucketPolicy : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketPolicy : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { params = &PutBucketPolicyInput{} @@ -74,68 +92,90 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string - // The bucket policy as a JSON document. For directory buckets, the only IAM - // action supported in the bucket policy is s3express:CreateSession . + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy is + // s3express:CreateSession . // // This member is required. Policy *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. This functionality is not supported - // for directory buckets. + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool - // The MD5 hash of the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. This functionality is not supported for directory - // buckets. + // The MD5 hash of the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -190,6 +230,9 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -205,6 +248,21 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -244,6 +302,18 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -273,9 +343,10 @@ func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index bf59164c03..1b11ef3991 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -15,47 +15,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a replication -// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. Specify the replication configuration in the -// request body. In the replication configuration, you provide the name of the -// destination bucket or buckets where you want Amazon S3 to replicate objects, the -// IAM role that Amazon S3 can assume to replicate objects on your behalf, and -// other relevant information. You can invoke this request for a specific Amazon -// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) -// condition key. A replication configuration must include at least one rule, and -// can contain a maximum of 1,000. Each rule identifies a subset of objects to -// replicate by filtering the objects in the source bucket. To choose additional -// subsets of objects to replicate, add a rule for each subset. To specify a subset -// of the objects in the source bucket to apply a replication rule to, add the -// Filter element as a child of the Rule element. You can filter objects based on -// an object key prefix, one or more object tags, or both. When you add the Filter -// element in the configuration, you must also add the following elements: -// DeleteMarkerReplication , Status , and Priority . If you are using an earlier -// version of the replication configuration, Amazon S3 handles replication of -// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) -// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// This operation is not supported for directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see [Replication]in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets where +// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume +// to replicate objects on your behalf, and other relevant information. You can +// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] +// aws:RequestedRegion condition key. +// +// A replication configuration must include at least one rule, and can contain a +// maximum of 1,000. Each rule identifies a subset of objects to replicate by +// filtering the objects in the source bucket. To choose additional subsets of +// objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. When +// you add the Filter element in the configuration, you must also add the following +// elements: DeleteMarkerReplication , Status , and Priority . +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// For information about enabling versioning on a bucket, see [Using Versioning]. +// +// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't // replicate objects that are stored at rest using server-side encryption with KMS // keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: // SourceSelectionCriteria , SseKmsEncryptedObjects , Status , // EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see Replicating Objects Created with SSE Using KMS -// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) -// . For information on PutBucketReplication errors, see List of -// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. +// +// For information on PutBucketReplication errors, see [List of replication-related error codes] +// // Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. By default, a -// resource owner, in this case the Amazon Web Services account that created the -// bucket, can perform this operation. The resource owner can also grant others -// permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . To perform this operation, the user or role performing the action must have -// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. The following operations are related to PutBucketReplication : -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// s3:PutReplicationConfiguration permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account that +// created the bucket, can perform this operation. The resource owner can also +// grant others permissions to perform the operation. For more information about +// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// To perform this operation, the user or role performing the action must have the [iam:PassRole] +// permission. +// +// The following operations are related to PutBucketReplication : +// +// [GetBucketReplication] +// +// [DeleteBucketReplication] +// +// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion +// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html +// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { params = &PutBucketReplicationInput{} @@ -84,21 +108,27 @@ type PutBucketReplicationInput struct { // This member is required. ReplicationConfiguration *types.ReplicationConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +143,7 @@ type PutBucketReplicationInput struct { } func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -167,6 +198,9 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +216,21 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -221,6 +270,18 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -250,9 +311,10 @@ func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, b } func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go index 07e0f16399..a6d19c6099 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the request payment -// configuration for a bucket. By default, the bucket owner pays for downloads from -// the bucket. This configuration parameter enables the bucket owner (only) to -// specify that the person requesting the download will be charged for the -// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to PutBucketRequestPayment : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// This operation is not supported for directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to PutBucketRequestPayment : +// +// [CreateBucket] +// +// [GetBucketRequestPayment] +// +// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { if params == nil { params = &PutBucketRequestPaymentInput{} @@ -50,21 +58,27 @@ type PutBucketRequestPaymentInput struct { // This member is required. RequestPaymentConfiguration *types.RequestPaymentConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -76,6 +90,7 @@ type PutBucketRequestPaymentInput struct { } func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -130,6 +145,9 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -145,6 +163,21 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -184,6 +217,18 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -213,9 +258,10 @@ func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string } func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index 0f0a6fd406..db5e62dfd8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -15,39 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the tags for a -// bucket. Use tags to organize your Amazon Web Services bill to reflect your own -// cost structure. To do this, sign up to get your Amazon Web Services account bill -// with tag key values included. Then, to see the cost of combined resources, -// organize your billing information according to resources with the same tag key -// values. For example, you can tag several resources with a specific application -// name, and then organize your billing information to see the total cost of that -// application across several services. For more information, see Cost Allocation -// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . When this operation sets the tags for a bucket, it will overwrite any current +// This operation is not supported for directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill with +// tag key values included. Then, to see the cost of combined resources, organize +// your billing information according to resources with the same tag key values. +// For example, you can tag several resources with a specific application name, and +// then organize your billing information to see the total cost of that application +// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. +// +// When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. To use this operation, you must have permissions to -// perform the s3:PutBucketTagging action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// existing list of tags. +// +// To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . +// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // bucket. // // The following operations are related to PutBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [GetBucketTagging] +// +// [DeleteBucketTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html +// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { params = &PutBucketTaggingInput{} @@ -75,21 +90,27 @@ type PutBucketTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -101,6 +122,7 @@ type PutBucketTaggingInput struct { } func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -155,6 +177,9 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -170,6 +195,21 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -209,6 +249,18 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -238,9 +290,10 @@ func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go index 495725cea9..aa30d5fad1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -15,28 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the versioning state -// of an existing bucket. You can set the versioning state with one of the -// following values: Enabled—Enables versioning for the objects in the bucket. All -// objects added to the bucket receive a unique version ID. Suspended—Disables -// versioning for the objects in the bucket. All objects added to the bucket -// receive the version ID null. If the versioning state has never been set on a -// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// request does not return a versioning state value. In order to enable MFA Delete, -// you must be the bucket owner. If you are the bucket owner and want to enable MFA -// Delete in the bucket versioning configuration, you must include the x-amz-mfa -// request header and the Status and the MfaDelete request elements in a request -// to set the versioning state of the bucket. If you have an object expiration -// lifecycle configuration in your non-versioned bucket and you want to maintain -// the same permanent delete behavior when you enable versioning, you must add a -// noncurrent expiration policy. The noncurrent expiration lifecycle configuration -// will manage the deletes of the noncurrent object versions in the version-enabled -// bucket. (A version-enabled bucket maintains one current and zero or more -// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) -// . The following operations are related to PutBucketVersioning : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported for directory buckets. +// +// When you enable versioning on a bucket for the first time, it might take a +// short amount of time for the change to be fully propagated. While this change is +// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for +// requests to objects created or updated after enabling versioning. We recommend +// that you wait for 15 minutes after enabling versioning before issuing write +// operations ( PUT or DELETE ) on objects in the bucket. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added to +// the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects added +// to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a [GetBucketVersioning]request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning +// configuration, you must include the x-amz-mfa request header and the Status and +// the MfaDelete request elements in a request to set the versioning state of the +// bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see [Lifecycle and Versioning]. +// +// The following operations are related to PutBucketVersioning : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetBucketVersioning] +// +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { if params == nil { params = &PutBucketVersioningInput{} @@ -64,21 +90,27 @@ type PutBucketVersioningInput struct { // This member is required. VersioningConfiguration *types.VersioningConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -94,6 +126,7 @@ type PutBucketVersioningInput struct { } func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -148,6 +181,9 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -163,6 +199,21 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -202,6 +253,18 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -231,9 +294,10 @@ func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 08c8a582f5..169b24bcca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -15,21 +15,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the configuration of -// the website that is specified in the website subresource. To configure a bucket -// as a website, you can add this subresource on the bucket with website -// configuration information such as the file name of the index document and any -// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, only // the bucket owner can configure the website attached to a bucket; however, bucket // owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect -// all website requests sent to the bucket's website endpoint, you add a website -// configuration with the following elements. Because all requests are sent to -// another website, you don't need to provide index document name for the bucket. +// bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you add +// a website configuration with the following elements. Because all requests are +// sent to another website, you don't need to provide index document name for the +// bucket. +// // - WebsiteConfiguration +// // - RedirectAllRequestsTo +// // - HostName +// // - Protocol // // If you want granular control over redirects, you can use the following elements @@ -37,27 +45,47 @@ import ( // information about the redirect destination. In this case, the website // configuration must provide an index document for the bucket, because some // requests might not be redirected. +// // - WebsiteConfiguration +// // - IndexDocument +// // - Suffix +// // - ErrorDocument +// // - Key +// // - RoutingRules +// // - RoutingRule +// // - Condition +// // - HttpErrorCodeReturnedEquals +// // - KeyPrefixEquals +// // - Redirect +// // - Protocol +// // - HostName +// // - ReplaceKeyPrefixWith +// // - ReplaceKeyWith +// // - HttpRedirectCode // // Amazon S3 has a limitation of 50 routing rules per website configuration. If // you require more than 50 routing rules, you can use object redirect. For more -// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. +// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} @@ -85,21 +113,27 @@ type PutBucketWebsiteInput struct { // This member is required. WebsiteConfiguration *types.WebsiteConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -111,6 +145,7 @@ type PutBucketWebsiteInput struct { } func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -165,6 +200,9 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -180,6 +218,21 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -219,6 +272,18 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -248,9 +313,10 @@ func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index d57e0026ef..24d7c9c69c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -18,51 +18,73 @@ import ( ) // Adds an object to a bucket. +// // - Amazon S3 never adds partial objects; if you receive a success response, // Amazon S3 added the entire object to the bucket. You cannot use PutObject to // only update a single piece of metadata for an existing object. You must put the // entire object with updated metadata if you want to update some values. +// // - If your bucket uses the bucket owner enforced setting for Object Ownership, // ACLs are disabled and no longer affect permissions. All objects written to the // bucket by any account will be owned by the bucket owner. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Amazon S3 is a distributed system. If it receives multiple write requests for // the same object simultaneously, it overwrites all but the last object written. // However, Amazon S3 provides features that can modify this behavior: +// // - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. +// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +// +// - If-None-Match - Uploads the object only if the object key name does not +// already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 +// Precondition Failed error. If a conflicting operation occurs during the +// upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 +// failure, retry the upload. +// +// Expects the * character (asterisk). +// +// For more information, see [Add preconditions to S3 operations with conditional requests]in the Amazon S3 User Guide or [RFC 7232]. +// +// This functionality is not supported for S3 on Outposts. +// // - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 // receives multiple write requests for the same object simultaneously, it stores // all versions of the objects. For each write request that is made to the same // object, Amazon S3 automatically generates a unique version ID of that object // being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see Adding Objects to -// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) -// in the Amazon S3 User Guide. For information about returning the versioning -// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// . This functionality is not supported for directory buckets. +// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User +// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] +// . +// +// This functionality is not supported for directory buckets. // // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your PutObject request includes specific headers. +// // - s3:PutObject - To successfully complete the PutObject request, you must // always have the s3:PutObject permission on a bucket to add an object to it. +// // - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject // request, you must have the s3:PutObjectAcl . +// // - s3:PutObjectTagging - To successfully set the tag-set with your PutObject // request, you must have the s3:PutObjectTagging . +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,24 +92,44 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Data integrity with Content-MD5 +// // - General purpose bucket - To ensure that data is not corrupted traversing // the network, use the Content-MD5 header. When you use this header, Amazon S3 // checks the object against the provided MD5 value and, if they do not match, // Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 // digest, you can calculate the MD5 while putting the object to Amazon S3 and // compare the returned ETag to the calculated MD5 value. +// // - Directory bucket - This functionality is not supported for directory // buckets. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about -// related Amazon S3 APIs, see the following: -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For more information about related Amazon S3 APIs, see the following: +// +// [CopyObject] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html +// [Add preconditions to S3 operations with conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/ +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { if params == nil { params = &PutObjectInput{} @@ -105,31 +147,40 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -139,26 +190,33 @@ type PutObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. When adding a new object, you can use headers to - // grant ACL-based permissions to individual Amazon Web Services accounts or to - // predefined groups defined by Amazon S3. These permissions are then added to the - // ACL on the object. By default, all objects are private. Only the owner has full - // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) - // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses - // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and - // no longer affect permissions. Buckets that use this setting only accept PUT - // requests that don't specify an ACL or PUT requests that specify bucket owner - // full control ACLs, such as the bucket-owner-full-control canned ACL or an - // equivalent form of this ACL expressed in the XML format. PUT requests that - // contain other ACLs (for example, custom grants to certain Amazon Web Services - // accounts) fail and return a 400 error with the error code - // AccessControlListNotSupported . For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon + // S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions to + // individual Amazon Web Services accounts or to predefined groups defined by + // Amazon S3. These permissions are then added to the ACL on the object. By + // default, all objects are private. Only the owner has full access control. For + // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL + // expressed in the XML format. PUT requests that contain other ACLs (for example, + // custom grants to certain Amazon Web Services accounts) fail and return a 400 + // error with the error code AccessControlListNotSupported . For more information, + // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Object data. @@ -166,103 +224,148 @@ type PutObjectInput struct { // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - // bucket-level settings for S3 Bucket Key. This functionality is not supported for - // directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) - // . + // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. + // + // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 CacheControl *string // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string - // Specifies presentational information for the object. For more information, see - // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) - // . + // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. + // + // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) - // . + // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding ContentEncoding *string // The language the content is in. ContentLanguage *string // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) - // . + // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // The Base64 encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to // verify that the data is the same data that was originally sent. Although it is // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see - // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // . The Content-MD5 header is required for any request to upload an object with a - // retention period configured using Amazon S3 Object Lock. For more information - // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // integrity check. For more information about REST request authentication, see [REST Authentication]. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ContentMD5 *string // A standard MIME type describing the format of the contents. For more - // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type ContentType *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -271,146 +374,281 @@ type PutObjectInput struct { ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more - // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. + // + // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag and retry the upload. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should retry the + // upload. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + // A map of metadata to store with the object in S3. Metadata map[string]string // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. This functionality - // is not supported for directory buckets. + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. This functionality is not supported for - // directory buckets. + // formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value is stored as object - // metadata and automatically gets passed on to Amazon Web Services KMS for future - // GetObject or CopyObject operations on this object. This value must be - // explicitly added during CopyObject operations. This functionality is not - // supported for directory buckets. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key - // Management Service (KMS) symmetric encryption customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms or + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or // x-amz-server-side-encryption:aws:kms:dsse , but do not provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not - // exist in the same account that's issuing the command, you must use the full ARN - // and not just the ID. This functionality is not supported for directory buckets. + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose - // buckets - You have four mutually exclusive options to protect data using - // server-side encryption in Amazon S3, depending on how you choose to manage the - // encryption keys. Specifically, the encryption key options are Amazon S3 managed - // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and - // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side - // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can - // optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see Using Server-Side - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is - // supported. + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // - General purpose buckets - You have four mutually exclusive options to + // protect data using server-side encryption in Amazon S3, depending on how you + // choose to manage the encryption keys. Specifically, the encryption key options + // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You + // can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 + // User Guide. + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. - // - For directory buckets, only the S3 Express One Zone storage class is - // supported to store newly created objects. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // - Directory buckets only support EXPRESS_ONEZONE (the S3 Express One Zone + // storage class) in Availability Zones and ONEZONE_IA (the S3 One + // Zone-Infrequent Access storage class) in Dedicated Local Zones. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") This functionality is not supported for - // directory buckets. + // parameters. (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) - // in the Amazon S3 User Guide. In the following example, the request header sets - // the redirect to an object (anotherPage.html) in the same bucket: - // x-amz-website-redirect-location: /anotherPage.html In the following example, the - // request header sets the object redirect to another website: - // x-amz-website-redirect-location: http://www.example.com/ For more information - // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html WebsiteRedirectLocation *string + // Specifies the offset for appending data to existing objects in bytes. The + // offset must be equal to the size of the existing object being appended to. If no + // object exists, setting this header to 0 will create a new object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + WriteOffsetBytes *int64 + noSmithyDocumentSerde } func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -419,107 +657,152 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This header is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // it was uploaded without a checksum (and Amazon S3 added the default checksum, + // CRC64NVME , to the uploaded object). For more information about how checksums + // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string - // Entity tag for the uploaded object. General purpose buckets - To ensure that - // data is not corrupted traversing the network, for objects where the ETag is the - // MD5 digest of the object, you can calculate the MD5 while putting an object to - // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory - // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of - // the object. + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. For PutObject uploads, the checksum type is always + // FULL_OBJECT . You can use this header as a data integrity check to verify that + // the checksum type that is received is the same checksum that was specified. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing the + // network, for objects where the ETag is the MD5 digest of the object, you can + // calculate the MD5 while putting an object to Amazon S3 and compare the returned + // ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 + // digest of the object. ETag *string - // If the expiration is configured for the object (see - // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ) in the Amazon S3 User Guide, the response includes this header. It includes - // the expiry-date and rule-id key-value pairs that provide information about - // object expiration. The value of the rule-id is URL-encoded. This functionality - // is not supported for directory buckets. + // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User + // Guide, the response includes this header. It includes the expiry-date and + // rule-id key-value pairs that provide information about object expiration. The + // value of the rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This value is stored - // as object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. This - // functionality is not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption - // Version ID of the object. If you enable versioning for a bucket, Amazon S3 - // automatically generates a unique version ID for the object being stored. Amazon - // S3 returns this ID in the response. When you enable versioning for a bucket, if - // Amazon S3 receives multiple write requests for the same object simultaneously, - // it stores all of the objects. For more information about versioning, see Adding - // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) - // in the Amazon S3 User Guide. For information about returning the versioning - // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) - // . This functionality is not supported for directory buckets. + // The size of the object in bytes. This value is only be present if you append + // to an object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + Size *int64 + + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates a + // unique version ID for the object being stored. Amazon S3 returns this ID in the + // response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all of the + // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. + // + // This functionality is not supported for directory buckets. + // + // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html VersionId *string // Metadata pertaining to the operation's result. @@ -571,6 +854,9 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -586,6 +872,21 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectValidationMiddleware(stack); err != nil { return err } @@ -628,6 +929,18 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -657,9 +970,10 @@ func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -703,6 +1017,8 @@ func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectI } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, c.client.addOperationPutObjectMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 08fea12c1b..b906aad734 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -14,87 +14,152 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Uses the acl subresource -// to set the access control list (ACL) permissions for a new or existing object in -// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an -// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 -// on Outposts. Depending on your application needs, you can choose to set the ACL -// on an object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request body, -// you can continue to use that approach. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions using -// one of the following methods: +// This operation is not supported for directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have the WRITE_ACP permission +// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User +// Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an object +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, you can +// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User +// Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions using one of the following methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use x-amz-acl // header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-read header grants list objects +// +// permission to the two Amazon Web Services accounts identified by their email +// addresses. +// +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved -// to the CanonicalUser and, in a response to a GET Object acl request, appears as -// the CanonicalUser. Using email addresses to specify a grantee is only supported -// in the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>lt;/Grantee> +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // Versioning The ACL of an object is set at the object version level. By default, // PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. The following operations are -// related to PutObjectAcl : -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// different version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl : +// +// [CopyObject] +// +// [GetObject] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { params = &PutObjectAclInput{} @@ -113,22 +178,27 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,8 +208,9 @@ type PutObjectAclInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // . + // The canned ACL to apply to the object. For more information, see [Canned ACL]. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL ACL types.ObjectCannedACL // Contains the elements that set the ACL permissions for an object per grantee. @@ -149,17 +220,23 @@ type PutObjectAclInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.>] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -168,44 +245,54 @@ type PutObjectAclInput struct { ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for Amazon S3 on Outposts. + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // Amazon S3 on Outposts. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -214,7 +301,12 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -266,6 +358,9 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -281,6 +376,21 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { return err } @@ -320,6 +430,18 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -349,9 +471,10 @@ func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index cc23509f8b..eefb409188 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -14,9 +14,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Applies a legal hold -// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. +// This operation is not supported for directory buckets. +// +// Applies a legal hold configuration to the specified object. For more +// information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,15 +40,19 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. - // Access points - When you use this action with an access point, you must provide - // the alias of the access point in place of the bucket name or specify the access - // point ARN. When using the access point ARN, you must direct requests to the - // access point hostname. The access point hostname takes the form + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -57,15 +66,19 @@ type PutObjectLegalHoldInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -81,10 +94,12 @@ type PutObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -94,6 +109,7 @@ type PutObjectLegalHoldInput struct { } func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -101,7 +117,12 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -153,6 +174,9 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +192,21 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -207,6 +246,18 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +287,10 @@ func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index 358ececc6d..48240d62d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -14,17 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object Lock -// configuration on the specified bucket. The rule specified in the Object Lock -// configuration will be applied by default to every new object placed in the -// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . +// This operation is not supported for directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see [Locking Objects]. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. +// // - You can enable Object Lock for new or existing buckets. For more -// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) -// . +// information, see [Configuring Object Lock]. +// +// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -51,15 +56,19 @@ type PutObjectLockConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -74,10 +83,12 @@ type PutObjectLockConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -87,6 +98,7 @@ type PutObjectLockConfigurationInput struct { } func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -94,7 +106,12 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -146,6 +163,9 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -161,6 +181,21 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -200,6 +235,18 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -229,9 +276,10 @@ func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (str } func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index eb787de489..cf7f4f44b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -14,12 +14,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object -// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . Users or accounts require the s3:PutObjectRetention permission in order to -// place an Object Retention configuration on objects. Bypassing a Governance +// This operation is not supported for directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order +// to place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. +// // This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -38,15 +42,20 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object - // Retention configuration to. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // Retention configuration to. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -64,15 +73,19 @@ type PutObjectRetentionInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -84,10 +97,12 @@ type PutObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -101,6 +116,7 @@ type PutObjectRetentionInput struct { } func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -108,7 +124,12 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -160,6 +181,9 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -175,6 +199,21 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -214,6 +253,18 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -243,9 +294,10 @@ func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index 2768db5025..bd680b2141 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -14,35 +14,50 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the supplied tag-set -// to an object that already exists in a bucket. A tag is a key-value pair. For -// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . You can associate tags with an object by sending a PUT request against the +// This operation is not supported for directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A tag +// is a key-value pair. For more information, see [Object Tagging]. +// +// You can associate tags with an object by sending a PUT request against the // tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// . For tagging-related restrictions related to characters and encodings, see Tag -// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// sending a GET request. For more information, see [GetObjectTagging]. +// +// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// // To use this operation, you must have permission to perform the // s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. To put tags of any other version, use the -// versionId query parameter. You also need permission for the -// s3:PutObjectVersionTagging action. PutObjectTagging has the following special -// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// can grant this permission to others. +// +// To put tags of any other version, use the versionId query parameter. You also +// need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Object -// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . +// the tag did not pass input validation. For more information, see [Object Tagging]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // object. // // The following operations are related to PutObjectTagging : -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// [GetObjectTagging] +// +// [DeleteObjectTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html +// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { params = &PutObjectTaggingInput{} @@ -60,23 +75,28 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. Access points - When you use this action - // with an access point, you must provide the alias of the access point in place of - // the bucket name or specify the access point ARN. When using the access point - // ARN, you must direct requests to the access point hostname. The access point - // hostname takes the form + // The bucket name containing the object. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -95,15 +115,19 @@ type PutObjectTaggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -115,10 +139,12 @@ type PutObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -128,6 +154,7 @@ type PutObjectTaggingInput struct { } func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -186,6 +213,9 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -201,6 +231,21 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -240,6 +285,18 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -269,9 +326,10 @@ func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index 7e6d0788e4..114bcc1498 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -15,22 +15,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the // account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to PutPublicAccessBlock : -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to PutPublicAccessBlock : +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { params = &PutPublicAccessBlockInput{} @@ -56,9 +72,10 @@ type PutPublicAccessBlockInput struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see The - // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon S3 User Guide. + // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + // the Amazon S3 User Guide. + // + // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status // // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration @@ -67,15 +84,19 @@ type PutPublicAccessBlockInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutPublicAccessBlock request body. For requests made using - // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services - // SDKs, this field is calculated automatically. + // The MD5 hash of the PutPublicAccessBlock request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -87,6 +108,7 @@ type PutPublicAccessBlockInput struct { } func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -141,6 +163,9 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -156,6 +181,21 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -195,6 +235,18 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -224,9 +276,10 @@ func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, b } func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index 3b6aad85b8..2382537a8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -14,29 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Restores an archived copy -// of an object back into Amazon S3 This functionality is not supported for Amazon -// S3 on Outposts. This action performs the following types of requests: +// This operation is not supported for directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the // following: -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide -// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide +// +// [PutObject] +// +// [Managing Access with ACLs] +// - in the Amazon S3 User Guide +// +// [Protecting Data Using Server-Side Encryption] +// - in the Amazon S3 User Guide // // Permissions To use this operation, you must have permissions to perform the // s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the -// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive -// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers, are not accessible in real time. For objects in the S3 Glacier -// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or +// S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For +// objects in the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage // classes, you must first initiate a restore request, and then wait until a // temporary copy of the object is available. If you want a permanent copy of the // object, create a copy of it in the Amazon S3 Standard storage class in your S3 @@ -44,61 +51,70 @@ import ( // duration (number of days) that you specify. For objects in the Archive Access or // Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a // restore request, and then wait until the object is moved into the Frequent -// Access tier. To restore a specific object version, you can provide a version ID. -// If you don't provide a version ID, Amazon S3 restores the current version. When -// restoring an archived object, you can specify one of the following data access -// tier options in the Tier element of the request body: +// Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// // - Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or -// S3 Intelligent-Tiering Archive tier when occasional urgent requests for -// restoring archives are required. For all but the largest archived objects (250 -// MB+), data accessed using Expedited retrievals is typically made available -// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for -// Expedited retrievals is available when you need it. Expedited retrievals and -// provisioned capacity are not available for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// stored in the S3 Glacier Flexible Retrieval storage class or S3 +// Intelligent-Tiering Archive tier when occasional urgent requests for restoring +// archives are required. For all but the largest archived objects (250 MB+), data +// accessed using Expedited retrievals is typically made available within 1–5 +// minutes. Provisioned capacity ensures that retrieval capacity for Expedited +// retrievals is available when you need it. Expedited retrievals and provisioned +// capacity are not available for objects stored in the S3 Glacier Deep Archive +// storage class or S3 Intelligent-Tiering Deep Archive tier. +// // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval requests // that do not specify the retrieval option. Standard retrievals typically finish -// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They -// typically finish within 12 hours for objects stored in the S3 Glacier Deep -// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard -// retrievals are free for objects stored in S3 Intelligent-Tiering. +// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 +// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 +// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects +// stored in S3 Intelligent-Tiering. +// // - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible // Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve // large amounts, even petabytes, of data at no cost. Bulk retrievals typically // finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval -// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk -// retrievals are also the lowest-cost retrieval option when restoring objects from -// S3 Glacier Deep Archive. They typically finish within 48 hours for objects -// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering -// Deep Archive tier. +// storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also +// the lowest-cost retrieval option when restoring objects from S3 Glacier Deep +// Archive. They typically finish within 48 hours for objects stored in the S3 +// Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to -// change the restore speed to a faster speed while it is in progress. For more -// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon S3 User Guide. To get the status of object restoration, you can -// send a HEAD request. Operations return the x-amz-restore header, which provides -// information about the restoration status, in the response. You can use Amazon S3 -// event notifications to notify you when a restore is initiated or completed. For -// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. After restoring an archived object, you can update -// the restoration period by reissuing the request with a new period. Amazon S3 -// updates the restoration period relative to the current time and charges only for -// the request-there are no data transfer charges. You cannot update the -// restoration period when Amazon S3 is actively processing your current restore -// request for the object. If your bucket has a lifecycle configuration with a rule -// that includes an expiration action, the object expiration overrides the life -// span that you specify in a restore request. For example, if you restore an -// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon -// S3 deletes the object in 3 days. For more information about lifecycle -// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK -// or 202 Accepted status code. +// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to a +// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon +// S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. +// Operations return the x-amz-restore header, which provides information about +// the restoration status, in the response. You can use Amazon S3 event +// notifications to notify you when a restore is initiated or completed. For more +// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period by +// reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there are +// no data transfer charges. You cannot update the restoration period when Amazon +// S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy for 10 +// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the +// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] +// in Amazon S3 User Guide. +// +// Responses A successful action returns either the 200 OK or 202 Accepted status +// code. // // - If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -128,8 +144,22 @@ import ( // - SOAP Fault Code Prefix: N/A // // The following operations are related to RestoreObject : -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketNotificationConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { if params == nil { params = &RestoreObjectInput{} @@ -147,23 +177,28 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. Access points - When you use - // this action with an access point, you must provide the alias of the access point - // in place of the bucket name or specify the access point ARN. When using the - // access point ARN, you must direct requests to the access point hostname. The - // access point hostname takes the form + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -177,10 +212,13 @@ type RestoreObjectInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -192,10 +230,12 @@ type RestoreObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Container for restore job parameters. @@ -208,6 +248,7 @@ type RestoreObjectInput struct { } func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -215,7 +256,12 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -271,6 +317,9 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -286,6 +335,21 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { return err } @@ -322,6 +386,18 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -351,9 +427,10 @@ func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getRestoreObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go index f69db696ac..2dfa63c6e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -14,67 +14,94 @@ import ( "sync" ) -// This operation is not supported by directory buckets. This action filters the -// contents of an Amazon S3 object based on a simple structured query language -// (SQL) statement. In the request, along with the SQL expression, you must also -// specify a data serialization format (JSON, CSV, or Apache Parquet) of the -// object. Amazon S3 uses this format to parse object data into records, and -// returns only records that match the specified SQL expression. You must also -// specify the data serialization format for the response. This functionality is -// not supported for Amazon S3 on Outposts. For more information about Amazon S3 -// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject -// permission for this operation. Amazon S3 Select does not support anonymous -// access. For more information about permissions, see Specifying Permissions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to -// query objects that have the following format properties: +// This operation is not supported for directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User +// Guide. +// +// Permissions You must have the s3:GetObject permission for this operation. +// Amazon S3 Select does not support anonymous access. For more information about +// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. +// +// Object Data Formats You can use Amazon S3 Select to query objects that have the +// following format properties: +// // - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// // - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// // - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. // GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports // for CSV and JSON files. Amazon S3 Select supports columnar compression for // Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object // compression for Parquet objects. +// // - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. For objects that are encrypted with -// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use -// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 -// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. For -// more information about server-side encryption, including SSE-S3 and SSE-KMS, see -// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide. +// are protected with server-side encryption. +// +// For objects that are encrypted with customer-provided encryption keys (SSE-C), +// +// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. +// +// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon +// +// Web Services KMS keys (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information about +// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 +// User Guide. // // Working with the Response Body Given the response size is unknown, Amazon S3 // Select streams the response as a series of messages and includes a // Transfer-Encoding header with chunked as its value in the response. For more -// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) -// . GetObject Support The SelectObjectContent action does not support the -// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . +// information, see [Appendix: SelectObjectContent Response]. +// +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see [GetObject]. +// // - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an object -// to return. +// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of +// bytes of an object to return. +// // - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the // ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING // storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or // REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or // DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) -// in the Amazon S3 User Guide. +// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] // -// Special Errors For a list of special errors for this operation, see List of -// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // The following operations are related to SelectObjectContent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// [GetObject] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration] +// +// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html +// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange +// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html +// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { params = &SelectObjectContentInput{} @@ -90,14 +117,18 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo return out, nil } +// Learn Amazon S3 Select is no longer available to new customers. Existing +// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Request to filter the contents of an Amazon S3 object based on a simple // Structured Query Language (SQL) statement. In the request, along with the SQL // expression, you must specify a data serialization format (JSON or CSV) of the // object. Amazon S3 uses this to parse object data into records. It returns only // records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see S3Select API -// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) -// . +// serialization format for the response. For more information, see [S3Select API Documentation]. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html type SelectObjectContentInput struct { // The S3 bucket. @@ -140,30 +171,37 @@ type SelectObjectContentInput struct { // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. ScanRange may be - // used in the following ways: + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRange may be used in the following ways: + // // - 50100 - process only the records starting between the bytes 50 and 100 // (inclusive, counting from zero) + // // - 50 - process only the records starting after the byte 50 + // // - 50 - process only the records within the last 50 bytes of the file. ScanRange *types.ScanRange @@ -171,6 +209,7 @@ type SelectObjectContentInput struct { } func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -235,6 +274,9 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -244,6 +286,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { return err } @@ -277,6 +331,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index ff73197949..c486ec3d57 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -16,58 +16,84 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide new data -// as a part of an object in your request. However, you have an option to specify -// your existing Amazon S3 object as a data source for the part you are uploading. -// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// ) before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier that you must include in your -// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. -// A part number uniquely identifies a part and also defines its position within -// the object being created. If you upload a new part using the same part number -// that was used with a previous part, the previously uploaded part is overwritten. -// For information about maximum and minimum part sizes and other multipart upload -// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. After you initiate multipart upload and upload one -// or more parts, you must either complete or abort multipart upload in order to -// stop getting charged for storage of the uploaded parts. Only after you either -// complete or abort multipart upload, Amazon S3 frees up the parts storage and -// stops charging you for the parts storage. For more information on multipart -// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information on the permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// Uploads a part in a multipart upload. // -// Data integrity General purpose bucket - To ensure that data is not corrupted +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as a data +// source for the part you are uploading. To upload a part from an existing object, +// you use the [UploadPartCopy]operation. +// +// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In +// response to your initiate request, Amazon S3 returns an upload ID, a unique +// identifier that you must include in your upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object being +// created. If you upload a new part using the same part number that was used with +// a previous part, the previously uploaded part is overwritten. +// +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. +// +// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service key, the requester must have +// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The +// requester must also have permissions for the kms:GenerateDataKey action for +// the CreateMultipartUpload API. Then, the requester needs permissions for the +// kms:Decrypt action on the UploadPart and UploadPartCopy APIs. +// +// These permissions are required because Amazon S3 must decrypt and read data +// +// from the encrypted file parts before it completes the multipart upload. For more +// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For +// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] +// and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// Data integrity General purpose bucket - To ensure that data is not corrupted // traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with // Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see -// Authenticating Requests: Using the Authorization Header (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) -// . Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. Encryption +// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption // - General purpose bucket - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You have mutually exclusive options to @@ -78,37 +104,76 @@ import ( // encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally // tell Amazon S3 to encrypt data at rest using server-side encryption with other // key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is -// supported by the S3 Multipart Upload operations. Unless you are using a -// customer-provided encryption key (SSE-C), you don't need to specify the -// encryption parameters in each UploadPart request. Instead, you only need to -// specify the server-side encryption parameters in the initial Initiate Multipart -// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . If you request server-side encryption using a customer-provided encryption key -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 -// - Directory bucket - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// (SSE-KMS) or provide your own encryption key (SSE-C). +// +// Server-side encryption is supported by the S3 Multipart Upload operations. +// +// Unless you are using a customer-provided encryption key (SSE-C), you don't need +// to specify the encryption parameters in each UploadPart request. Instead, you +// only need to specify the server-side encryption parameters in the initial +// Initiate Multipart request. For more information, see [CreateMultipartUpload]. +// +// If you request server-side encryption using a customer-provided encryption key +// +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). +// +// Special errors // -// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Special errors // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPart : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPart : +// +// [CreateMultipartUpload] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { if params == nil { params = &UploadPartInput{} @@ -126,31 +191,40 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -178,50 +252,67 @@ type UploadPartInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // This checksum algorithm must be the same for all parts and it match the + // checksum value supplied in the CreateMultipartUpload request. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. This functionality is not supported for - // directory buckets. + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -233,14 +324,17 @@ type UploadPartInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -248,20 +342,23 @@ type UploadPartInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported for directory buckets. + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -270,73 +367,92 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag for the uploaded object. ETag *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -388,6 +504,9 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -403,6 +522,21 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartValidationMiddleware(stack); err != nil { return err } @@ -445,6 +579,18 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -474,9 +620,10 @@ func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { } func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getUploadPartRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -521,6 +668,8 @@ func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPar } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, c.client.addOperationUploadPartMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index d42dc60cd3..56a015bcdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -18,90 +18,166 @@ import ( // Uploads a part by copying data from an existing object as data source. To // specify the data source, you add the request header x-amz-copy-source in your // request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. For information about maximum and -// minimum part sizes and other multipart upload specifications, see Multipart -// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. Instead of copying data from an existing object as -// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// action to upload new data as a part of an object in your request. You must -// initiate a multipart upload before you can upload any part. In response to your -// initiate request, Amazon S3 returns the upload ID, a unique identifier that you -// must include in your upload part request. For conceptual information about -// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. For information about copying objects using a -// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use IAM credentials to authenticate and authorize +// x-amz-copy-source-range in your request. +// +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use the [UploadPart] +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In +// response to your initiate request, Amazon S3 returns the upload ID, a unique +// identifier that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User +// Guide. For information about copying objects using a single atomic action vs. a +// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Authentication and authorization All UploadPartCopy requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions You must have READ access to the source object and WRITE access to // the destination bucket. +// // - General purpose bucket permissions - You must have the permissions in a // policy based on the bucket types of your source bucket and destination bucket in // an UploadPartCopy operation. +// // - If the source object is in a general purpose bucket, you must have the // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have the // s3:PutObject permission to write the object copy to the destination bucket. -// For information about permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// +// - To perform a multipart upload with encryption using an Key Management +// Service key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey actions on the key. The requester must also have +// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload +// API. Then, the requester needs permissions for the kms:Decrypt action on the +// UploadPart and UploadPartCopy APIs. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before it +// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] +// in the Amazon S3 User Guide. For information about the permissions required to +// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in an UploadPartCopy operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a -// policy to read the object . By default, the session is in the ReadWrite mode. +// policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. For example policies, see Example -// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Encryption +// // - General purpose buckets - For information about using server-side // encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// . -// - Directory buckets - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// operation, see [CopyObject]and [UploadPart]. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from +// +// general purpose buckets to directory buckets, from directory buckets to general +// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon +// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted +// object. // // Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - Error Code: InvalidRequest +// // - Description: The specified copy source is not supported as a byte-range // copy source. +// // - HTTP Status Code: 400 Bad Request // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPartCopy : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPartCopy : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { if params == nil { params = &UploadPartCopyInput{} @@ -119,43 +195,60 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point for general + // purpose buckets, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When you use this action with an + // access point for directory buckets, you must provide the access point name in + // place of the bucket name. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points are not supported by directory buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string // Specifies the source object for the copy operation. You specify the value in // one of two formats, depending on whether you want to access the source object - // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the bucket // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must // be URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -163,28 +256,39 @@ type UploadPartCopyInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your bucket has versioning enabled, you could have multiple versions of the // same object. By default, x-amz-copy-source identifies the current version of // the source object to copy. To copy a specific version of the source object to // copy, append ?versionId= to the x-amz-copy-source request header (for example, // x-amz-copy-source: // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If the current version is a delete marker and you don't specify a versionId - // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found - // error, because the object does not exist. If you specify versionId in the + // ). + // + // If the current version is a delete marker and you don't specify a versionId in + // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, + // because the object does not exist. If you specify versionId in the // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . Directory buckets - S3 - // Versioning isn't enabled and supported for directory buckets. + // marker as a version for the x-amz-copy-source . + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -205,34 +309,56 @@ type UploadPartCopyInput struct { // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both of - // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since - // headers are present in the request as follows: x-amz-copy-source-if-none-match - // condition evaluates to false , and; x-amz-copy-source-if-modified-since - // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed - // response code. + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both of the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both of the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; - // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 - // returns 412 Precondition Failed response code. + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -243,20 +369,26 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). This functionality is not supported when the source object is in a - // directory bucket. + // AES256 ). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. This functionality is not supported - // when the source object is in a directory bucket. + // was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the source object is in a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -273,15 +405,18 @@ type UploadPartCopyInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported when the destination bucket is a - // directory bucket. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -289,21 +424,25 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported when the destination bucket is a directory + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } @@ -311,42 +450,47 @@ func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. This functionality is not supported when the - // source object is in a directory bucket. + // versioning on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -398,6 +542,9 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -413,6 +560,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { return err } @@ -449,6 +608,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index e181ab7111..571931ad09 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -18,42 +18,54 @@ import ( "time" ) -// This operation is not supported by directory buckets. Passes transformed -// objects to a GetObject operation when using Object Lambda access points. For -// information about Object Lambda access points, see Transforming objects with -// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) -// in the Amazon S3 User Guide. This operation supports metadata that can be -// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and -// ErrorMessage . The GetObject response metadata is supported so that the -// WriteGetObjectResponse caller, typically an Lambda function, can provide the -// same metadata when it internally invokes GetObject . When WriteGetObjectResponse -// is called by a customer-owned Lambda function, the metadata returned to the end -// user GetObject call might differ from what Amazon S3 would normally return. You -// can include any number of metadata headers. When including a metadata header, it -// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: -// MyCustomValue . The primary use case for this is to forward GetObject metadata. +// This operation is not supported for directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the +// Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by [GetObject], in addition to +// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The +// GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject . When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta . For example, +// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to +// forward GetObject metadata. +// // Amazon Web Services provides some prebuilt Lambda functions that you can use // with S3 Object Lambda to detect and redact personally identifiable information // (PII) and decompress S3 objects. These Lambda functions are available in the // Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda access point. Example 1: PII Access Control - This Lambda function uses -// Amazon Comprehend, a natural language processing (NLP) service using machine -// learning to find insights and relationships in text. It automatically detects -// personally identifiable information (PII) such as names, addresses, dates, -// credit card numbers, and social security numbers from documents in your Amazon -// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon -// Comprehend, a natural language processing (NLP) service using machine learning -// to find insights and relationships in text. It automatically redacts personally +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally // identifiable information (PII) such as names, addresses, dates, credit card // numbers, and social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// // Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is // equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information -// on how to view and use these functions, see Using Amazon Web Services built -// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) -// in the Amazon S3 User Guide. +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 +// User Guide. +// +// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html +// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { if params == nil { params = &WriteGetObjectResponseInput{} @@ -88,7 +100,7 @@ type WriteGetObjectResponseInput struct { // The object data. Body io.Reader - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool @@ -96,47 +108,67 @@ type WriteGetObjectResponseInput struct { CacheControl *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. - ChecksumCRC32 *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. - ChecksumCRC32C *string - - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda // function. This may not match the checksum for the object stored in Amazon S3. // Amazon S3 will perform validation of the checksum values only when the original // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC32 *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC32C *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 160-bit SHA1 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 256-bit SHA256 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Specifies presentational information for the object. @@ -160,7 +192,9 @@ type WriteGetObjectResponseInput struct { ContentType *string // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) - // a delete marker. + // a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An opaque identifier assigned by a web server to a specific version of a @@ -205,8 +239,9 @@ type WriteGetObjectResponseInput struct { ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) - // . + // more information about S3 Object Lock, see [Object Lock]. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when Object Lock is configured to expire. @@ -216,12 +251,18 @@ type WriteGetObjectResponseInput struct { PartsCount *int32 // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) - // . + // a Replication rule. For more information about S3 Replication, see [Replication]. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. For more information, see [Using Requester Pays buckets for storage transfers and usage]in the Amazon Simple Storage Service user + // guide. + // + // This functionality is not supported for directory buckets. + // + // [Using Requester Pays buckets for storage transfers and usage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -232,43 +273,59 @@ type WriteGetObjectResponseInput struct { // encryption key was specified for object stored in Amazon S3. SSECustomerAlgorithm *string - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) - // . + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. + // + // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web // Services Key Management Service (Amazon Web Services KMS) symmetric encryption // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing requested object in + // The server-side encryption algorithm used when storing requested object in // Amazon S3 (for example, AES256, aws:kms ). ServerSideEncryption types.ServerSideEncryption // The integer status code for an HTTP response of a corresponding GetObject // request. The following is a list of status codes. + // // - 200 - OK + // // - 206 - Partial Content + // // - 304 - Not Modified + // // - 400 - Bad Request + // // - 401 - Unauthorized + // // - 403 - Forbidden + // // - 404 - Not Found + // // - 405 - Method Not Allowed + // // - 409 - Conflict + // // - 411 - Length Required + // // - 412 - Precondition Failed + // // - 416 - Range Not Satisfiable + // // - 500 - Internal Server Error + // // - 503 - Service Unavailable StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The number of tags, if any, on the object. @@ -338,6 +395,9 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -353,6 +413,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { return err } @@ -389,6 +461,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go index 6ef631bd38..6faedcc0b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -8,16 +8,18 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } -func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(input, options) +func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(ctx, input, options) } type setLegacyContextSigningOptionsMiddleware struct { @@ -98,13 +100,13 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthEndpointParams(params, input, options) - bindAuthParamsRegion(params, input, options) + bindAuthEndpointParams(ctx, params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -179,7 +181,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -191,6 +196,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -250,7 +258,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -260,12 +271,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -281,6 +300,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -290,6 +310,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -310,9 +333,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go index 2be5df30ff..f1e5ac029b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -20,13 +20,23 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestxml_deserializeOpAbortMultipartUpload struct { } @@ -42,6 +52,10 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -58,6 +72,7 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -132,6 +147,10 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -176,6 +195,7 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -321,6 +341,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -347,6 +380,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -415,6 +461,10 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -459,6 +509,7 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte } } + span.End() return out, metadata, err } @@ -623,6 +674,10 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -639,6 +694,7 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -701,6 +757,86 @@ func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutpu return nil } +type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata) + } + output := &CreateBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpCreateMultipartUpload struct { } @@ -716,6 +852,10 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -760,6 +900,7 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -836,6 +977,11 @@ func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMu v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.RequestCharged = types.RequestCharged(headerValues[0]) @@ -958,6 +1104,10 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -969,6 +1119,11 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co output := &CreateSessionOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -997,6 +1152,7 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -1043,6 +1199,37 @@ func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, m } } +func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1100,6 +1287,10 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1117,6 +1308,7 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -1175,6 +1367,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1192,6 +1388,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1250,6 +1447,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1267,6 +1468,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -1325,6 +1527,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1342,6 +1548,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -1400,6 +1607,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1417,6 +1628,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha } } + span.End() return out, metadata, err } @@ -1475,6 +1687,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1492,6 +1708,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1550,6 +1767,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1567,6 +1788,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -1610,6 +1832,86 @@ func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Res } } +type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata) + } + output := &DeleteBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { } @@ -1625,6 +1927,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1642,6 +1948,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -1700,6 +2007,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1717,6 +2028,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ } } + span.End() return out, metadata, err } @@ -1775,6 +2087,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1792,6 +2108,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -1850,6 +2167,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1867,6 +2188,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -1925,6 +2247,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1942,6 +2268,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2000,6 +2327,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2017,6 +2348,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2075,6 +2407,10 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2091,6 +2427,7 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2176,6 +2513,10 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2220,6 +2561,7 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -2338,6 +2680,10 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2354,6 +2700,7 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2425,6 +2772,10 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2442,6 +2793,7 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -2500,6 +2852,10 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2544,6 +2900,7 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -2663,6 +3020,10 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2702,6 +3063,7 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -2808,6 +3170,10 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2847,6 +3213,7 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -2947,6 +3314,10 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2986,6 +3357,7 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -3086,6 +3458,10 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3125,6 +3501,7 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -3225,6 +3602,10 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3264,6 +3645,7 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -3364,6 +3746,10 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3403,6 +3789,7 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3503,6 +3890,10 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3514,6 +3905,11 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial output := &GetBucketLifecycleConfigurationOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -3542,6 +3938,7 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3585,6 +3982,18 @@ func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3642,6 +4051,10 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3681,6 +4094,7 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex } } + span.End() return out, metadata, err } @@ -3788,6 +4202,10 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3827,6 +4245,7 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -3912,6 +4331,150 @@ func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLogging return nil } +type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata) + } + output := &GetBucketMetadataTableConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketMetadataTableConfigurationOutput + if *v == nil { + sv = &GetBucketMetadataTableConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { } @@ -3927,6 +4490,10 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3966,6 +4533,7 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } @@ -4066,6 +4634,10 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4105,6 +4677,7 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -4223,6 +4796,10 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4262,6 +4839,7 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -4362,6 +4940,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4378,6 +4960,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -4457,6 +5040,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4496,6 +5083,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -4596,6 +5184,10 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4635,6 +5227,7 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -4735,6 +5328,10 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4774,6 +5371,7 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -4881,6 +5479,10 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4920,6 +5522,7 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5020,6 +5623,10 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5059,6 +5666,7 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -5179,6 +5787,10 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5218,6 +5830,7 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5336,6 +5949,10 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5357,6 +5974,7 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -5440,6 +6058,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -5450,6 +6073,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -5504,12 +6132,17 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -5652,6 +6285,10 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5696,6 +6333,7 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -5817,6 +6455,10 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5861,6 +6503,7 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -6048,6 +6691,10 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6087,6 +6734,7 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6187,6 +6835,10 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6226,6 +6878,7 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -6326,6 +6979,10 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6365,6 +7022,7 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6465,6 +7123,10 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6509,6 +7171,7 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -6621,6 +7284,10 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6642,6 +7309,7 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -6720,6 +7388,10 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6759,6 +7431,7 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -6859,6 +7532,10 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6875,6 +7552,7 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -6968,6 +7646,10 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6984,6 +7666,7 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -7069,6 +7752,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -7079,6 +7767,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -7103,6 +7796,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ContentLength = ptr.Int64(vv) } + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentType = ptr.String(headerValues[0]) @@ -7128,12 +7826,17 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -7260,6 +7963,10 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7299,6 +8006,7 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7441,6 +8149,10 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7480,6 +8192,7 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han } } + span.End() return out, metadata, err } @@ -7622,6 +8335,10 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7661,6 +8378,7 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7803,6 +8521,10 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7842,6 +8564,7 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial } } + span.End() return out, metadata, err } @@ -7984,6 +8707,10 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8023,6 +8750,7 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8094,12 +8822,38 @@ func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, de return err } + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + case strings.EqualFold("Owner", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { return err } + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -8129,6 +8883,10 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8168,6 +8926,7 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8281,6 +9040,10 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8325,6 +9088,7 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8580,6 +9344,10 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8624,6 +9392,7 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8856,6 +9625,10 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8900,6 +9673,7 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -9162,6 +9936,10 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9206,6 +9984,7 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -9467,6 +10246,10 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9511,6 +10294,7 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex } } + span.End() return out, metadata, err } @@ -9628,6 +10412,19 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiator", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { @@ -9773,6 +10570,10 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9790,6 +10591,7 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -9848,6 +10650,10 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9865,6 +10671,7 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -9923,6 +10730,10 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9940,6 +10751,7 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -9998,6 +10810,10 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10015,6 +10831,7 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -10073,6 +10890,10 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10090,6 +10911,7 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -10148,6 +10970,10 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10165,6 +10991,7 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -10223,6 +11050,10 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10240,6 +11071,7 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -10298,6 +11130,10 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10309,12 +11145,12 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial output := &PutBucketLifecycleConfigurationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } + err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -10358,6 +11194,19 @@ func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} + type awsRestxml_deserializeOpPutBucketLogging struct { } @@ -10373,6 +11222,10 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10390,6 +11243,7 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10448,6 +11302,10 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10465,6 +11323,7 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } @@ -10523,6 +11382,10 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10540,6 +11403,7 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -10598,6 +11462,10 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10615,6 +11483,7 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -10673,6 +11542,10 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10690,6 +11563,7 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. } } + span.End() return out, metadata, err } @@ -10748,6 +11622,10 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10765,6 +11643,7 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -10823,6 +11702,10 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10840,6 +11723,7 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -10898,6 +11782,10 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10915,6 +11803,7 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10973,6 +11862,10 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10990,6 +11883,7 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -11048,6 +11942,10 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11065,6 +11963,7 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -11123,6 +12022,10 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11139,6 +12042,7 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11172,6 +12076,18 @@ func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metad } errorBody.Seek(0, io.SeekStart) switch { + case strings.EqualFold("EncryptionTypeMismatch", errorCode): + return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody) + + case strings.EqualFold("InvalidRequest", errorCode): + return awsRestxml_deserializeErrorInvalidRequest(response, errorBody) + + case strings.EqualFold("InvalidWriteOffset", errorCode): + return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody) + + case strings.EqualFold("TooManyParts", errorCode): + return awsRestxml_deserializeErrorTooManyParts(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -11206,6 +12122,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -11216,6 +12137,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ETag = ptr.String(headerValues[0]) @@ -11236,6 +12162,15 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.Size = ptr.Int64(vv) + } + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.SSECustomerAlgorithm = ptr.String(headerValues[0]) @@ -11279,6 +12214,10 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11295,6 +12234,7 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11369,6 +12309,10 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11385,6 +12329,7 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11456,6 +12401,10 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11472,6 +12421,7 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11543,6 +12493,10 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11559,6 +12513,7 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11630,6 +12585,10 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11646,6 +12605,7 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11717,6 +12677,10 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11734,6 +12698,7 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -11792,6 +12757,10 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11808,6 +12777,7 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11887,6 +12857,10 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11898,6 +12872,7 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont output := &SelectObjectContentOutput{} out.Result = output + span.End() return out, metadata, err } @@ -11956,6 +12931,10 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11972,6 +12951,7 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -12039,6 +13019,11 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -12097,6 +13082,10 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12141,6 +13130,7 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C } } + span.End() return out, metadata, err } @@ -12287,6 +13277,10 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12304,6 +13298,7 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -12865,6 +13860,11 @@ func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Res return output } +func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.EncryptionTypeMismatch{} + return output +} + func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidObjectState{} var buff [1024]byte @@ -12898,6 +13898,16 @@ func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response return output } +func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequest{} + return output +} + +func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidWriteOffset{} + return output +} + func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.NoSuchBucket{} return output @@ -12928,6 +13938,11 @@ func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp. return output } +func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyParts{} + return output +} + func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13677,6 +14692,19 @@ func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.No originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("BucketRegion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketRegion = ptr.String(xtv) + } + case strings.EqualFold("CreationDate", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13909,6 +14937,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13935,6 +14976,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -14256,6 +15310,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14282,6 +15349,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14374,6 +15454,19 @@ func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, deco sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -15210,6 +16303,42 @@ func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionC return nil } +func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionTypeMismatch + if *v == nil { + sv = &types.EncryptionTypeMismatch{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15298,6 +16427,68 @@ func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.Node return nil } +func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDetails + if *v == nil { + sv = &types.ErrorDetails{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorCode = ptr.String(xtv) + } + + case strings.EqualFold("ErrorMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15790,6 +16981,67 @@ func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule *v = sv return nil } +func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetBucketMetadataTableConfigurationResult + if *v == nil { + sv = &types.GetBucketMetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -16565,6 +17817,78 @@ func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectSta return nil } +func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidRequest + if *v == nil { + sv = &types.InvalidRequest{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidWriteOffset + if *v == nil { + sv = &types.InvalidWriteOffset{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -17525,12 +18849,17 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR return nil } -func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.LifecycleRuleFilter - var memberFound bool + var sv *types.LifecycleRuleFilter + if *v == nil { + sv = &types.LifecycleRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -17539,27 +18868,16 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.LifecycleRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17573,13 +18891,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeGreaterThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17593,13 +18908,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeLessThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -17609,30 +18921,26 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -17778,6 +19086,48 @@ func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, deco return nil } +func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetadataTableConfigurationResult + if *v == nil { + sv = &types.MetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3TablesDestinationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -18146,6 +19496,19 @@ func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, de sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiated", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -18731,6 +20094,19 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19230,6 +20606,19 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19332,6 +20721,19 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19797,6 +21199,19 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -20824,12 +22239,17 @@ func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.Replicat return nil } -func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.ReplicationRuleFilter - var memberFound bool + var sv *types.ReplicationRuleFilter + if *v == nil { + sv = &types.ReplicationRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -20838,27 +22258,16 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.ReplicationRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -20868,30 +22277,26 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -21298,6 +22703,94 @@ func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder sm return nil } +func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3TablesDestinationResult + if *v == nil { + sv = &types.S3TablesDestinationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TableArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableArn = ptr.String(xtv) + } + + case strings.EqualFold("TableBucketArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableBucketArn = ptr.String(xtv) + } + + case strings.EqualFold("TableName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableName = ptr.String(xtv) + } + + case strings.EqualFold("TableNamespace", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableNamespace = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -22370,6 +23863,42 @@ func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, deco *v = sv return nil } +func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TooManyParts + if *v == nil { + sv = &types.TooManyParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index e6ae2e8729..9734372c4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -20,6 +20,7 @@ import ( "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -326,6 +327,13 @@ type EndpointParameters struct { // is required. Prefix *string + // The Copy Source used for Copy Object request. This is an optional parameter that + // will be set automatically for operations that are scoped to Copy + // Source. + // + // Parameter is required. + CopySource *string + // Internal parameter to disable Access Point Buckets // // Parameter is required. @@ -419,6 +427,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -696,15 +715,61 @@ func (r *resolver) ResolveEndpoint( _UseS3ExpressControlEndpoint := *exprVal _ = _UseS3ExpressControlEndpoint if _UseS3ExpressControlEndpoint == true { - _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) - _ = _uri_encoded_bucket - if !(params.Endpoint != nil) { - if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if !(params.Endpoint != nil) { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control-fips.") + out.WriteString("https://s3express-control.") out.WriteString(_Region) - out.WriteString(".amazonaws.com/") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") out.WriteString(_uri_encoded_bucket) return out.String() }() @@ -739,12 +804,2190 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { + _accessPointSuffix := *exprVal + _ = _accessPointSuffix + if _accessPointSuffix == "--xa-s3" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com/") + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 16, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 16, 18, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 21, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 21, 23, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 7, 27, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 27, 29, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if !(params.Bucket != nil) { + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) return out.String() }() @@ -778,416 +3021,14 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "sigv4-s3express", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") - } - } - } - if !(params.Bucket != nil) { - if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { - _UseS3ExpressControlEndpoint := *exprVal - _ = _UseS3ExpressControlEndpoint - if _UseS3ExpressControlEndpoint == true { - if exprVal := params.Endpoint; exprVal != nil { - _Endpoint := *exprVal - _ = _Endpoint - if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { - _url := *exprVal - _ = _url + if _UseFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString(_url.Scheme) - out.WriteString("://") - out.WriteString(_url.Authority) - out.WriteString(_url.Path) + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1221,13 +3062,12 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - } - if _UseFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control-fips.") + out.WriteString("https://s3express-control.") out.WriteString(_Region) - out.WriteString(".amazonaws.com") + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) return out.String() }() @@ -1261,43 +3101,7 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") } } } @@ -3736,8 +5540,8 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") } if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_275 := *exprVal - _ = _var_275 + _var_321 := *exprVal + _ = _var_321 return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") } if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { @@ -3980,8 +5784,8 @@ func (r *resolver) ResolveEndpoint( } if _ForcePathStyle == true { if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_288 := *exprVal - _ = _var_288 + _var_334 := *exprVal + _ = _var_334 return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") } } @@ -5750,7 +7554,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -5782,6 +7586,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -5795,12 +7602,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -5822,8 +7634,11 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + ctx = setS3ResolvedURI(ctx, endpt.URI.String()) + backend := s3cust.GetPropertiesBackend(&endpt.Properties) ctx = internalcontext.SetS3Backend(ctx, backend) + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go new file mode 100644 index 0000000000..a9b54535bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "context" + "strings" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// isExpressUserAgent tracks whether the caller is using S3 Express +// +// we can only derive this at runtime, so the middleware needs to hold a handle +// to the underlying user-agent manipulator to set the feature flag as +// necessary +type isExpressUserAgent struct { + ua *awsmiddleware.RequestUserAgent +} + +func (*isExpressUserAgent) ID() string { + return "isExpressUserAgent" +} + +func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + const expressSuffix = "--x-s3" + + bucket, ok := bucketFromInput(in.Parameters) + if ok && strings.HasSuffix(bucket, expressSuffix) { + m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) + } + return next.HandleSerialize(ctx, in) +} + +func addIsExpressUserAgent(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 6e392285ef..fa0119bcb3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -18,6 +18,7 @@ "api_op_CompleteMultipartUpload.go", "api_op_CopyObject.go", "api_op_CreateBucket.go", + "api_op_CreateBucketMetadataTableConfiguration.go", "api_op_CreateMultipartUpload.go", "api_op_CreateSession.go", "api_op_DeleteBucket.go", @@ -27,6 +28,7 @@ "api_op_DeleteBucketIntelligentTieringConfiguration.go", "api_op_DeleteBucketInventoryConfiguration.go", "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetadataTableConfiguration.go", "api_op_DeleteBucketMetricsConfiguration.go", "api_op_DeleteBucketOwnershipControls.go", "api_op_DeleteBucketPolicy.go", @@ -47,6 +49,7 @@ "api_op_GetBucketLifecycleConfiguration.go", "api_op_GetBucketLocation.go", "api_op_GetBucketLogging.go", + "api_op_GetBucketMetadataTableConfiguration.go", "api_op_GetBucketMetricsConfiguration.go", "api_op_GetBucketNotificationConfiguration.go", "api_op_GetBucketOwnershipControls.go", @@ -123,13 +126,14 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", "types/types_exported_test.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/s3", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index 04c6fdbb3e..e2b5c6cfb3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.53.1" +const goModuleVersion = "1.80.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go index f3e6b0751d..38443a833b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -89,6 +89,7 @@ func New() *Resolver { var partitionRegexp = struct { Aws *regexp.Regexp AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp AwsIsoE *regexp.Regexp @@ -96,8 +97,9 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), @@ -252,6 +254,24 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com", + }, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ @@ -460,6 +480,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.me-south-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.mx-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "s3-external-1", }: endpoints.Endpoint{ @@ -641,6 +670,27 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, { ID: "aws-iso", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ @@ -773,19 +823,24 @@ var defaultPartitions = endpoints.Partitions{ Variant: endpoints.FIPSVariant, }: { Hostname: "s3-fips.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, { Variant: 0, }: { Hostname: "s3.{region}.cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, RegionRegex: partitionRegexp.AwsIsoE, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-iso-f", @@ -794,19 +849,27 @@ var defaultPartitions = endpoints.Partitions{ Variant: endpoints.FIPSVariant, }: { Hostname: "s3-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, { Variant: 0, }: { Hostname: "s3.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, RegionRegex: partitionRegexp.AwsIsoF, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-us-gov", @@ -947,6 +1010,19 @@ func GetDNSSuffix(id string, options Options) (string, error) { } + case strings.EqualFold(id, "aws-eusc"): + switch variant { + case endpoints.FIPSVariant: + return "amazonaws.eu", nil + + case 0: + return "amazonaws.eu", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + case strings.EqualFold(id, "aws-iso"): switch variant { case endpoints.FIPSVariant: @@ -1034,6 +1110,9 @@ func GetDNSSuffixFromRegion(region string, options Options) (string, error) { case partitionRegexp.AwsCn.MatchString(region): return GetDNSSuffix("aws-cn", options) + case partitionRegexp.AwsEusc.MatchString(region): + return GetDNSSuffix("aws-eusc", options) + case partitionRegexp.AwsIso.MatchString(region): return GetDNSSuffix("aws-iso", options) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go index 064bcefb4f..6f29b807f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -12,7 +12,9 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,6 +52,10 @@ type Options struct { // clients initial default settings. DefaultsMode aws.DefaultsMode + // Disables logging when the client skips output checksum validation due to lack + // of algorithm support. + DisableLogOutputChecksumValidationSkipped bool + // Allows you to disable S3 Multi-Region access points feature. DisableMultiRegionAccessPoints bool @@ -65,8 +71,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -82,23 +90,35 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string + // Indicates how user opt-in/out request checksum calculation + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Indicates how user opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -113,6 +133,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // Allows you to enable arn region support for the service. UseARNRegion bool @@ -141,8 +164,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -193,6 +217,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go new file mode 100644 index 0000000000..491ed2e5da --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go @@ -0,0 +1,419 @@ +package s3 + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + algorithmHeader = "X-Amz-Algorithm" + credentialHeader = "X-Amz-Credential" + dateHeader = "X-Amz-Date" + tokenHeader = "X-Amz-Security-Token" + signatureHeader = "X-Amz-Signature" + + algorithm = "AWS4-HMAC-SHA256" + aws4Request = "aws4_request" + bucketHeader = "bucket" + defaultExpiresIn = 15 * time.Minute + shortDateLayout = "20060102" +) + +// PresignPostObject is a special kind of [presigned request] used to send a request using +// form data, likely from an HTML form on a browser. +// Unlike other presigned operations, the return values of this function are not meant to be used directly +// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information +// on how to use these values +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html +// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + clientOptions := c.options.copy() + options := PresignPostOptions{ + Expires: clientOptions.Expires, + PostPresigner: &postSignAdapter{}, + } + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption) + cvt := presignPostConverter(options) + result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + cvt.ConvertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + ) + if err != nil { + return nil, err + } + + out := result.(*PresignedPostRequest) + return out, nil +} + +// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData +type PresignedPostRequest struct { + // Represents the Base URL to make a request to + URL string + // Values is a key-value map of values to be sent as FormData + // these values are not encoded + Values map[string]string +} + +// postSignAdapter adapter to implement the presignPost interface +type postSignAdapter struct{} + +// PresignPost creates a special kind of [presigned request] +// to be used with HTTP verb POST. +// It differs from PUT request mostly on +// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to +// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own +// 3. There's no body to be signed, since that will be attached when the actual request is made +// 4. The signature is made based on the policy document, not the whole request +// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html +func (s *postSignAdapter) PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions), +) (fields map[string]string, err error) { + credentialScope := buildCredentialScope(signingTime, region, service) + credentialStr := credentials.AccessKeyID + "/" + credentialScope + + policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions) + if err != nil { + return nil, err + } + + signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime) + + fields = getPostSignRequiredFields(signingTime, credentialStr, credentials) + fields[signatureHeader] = signature + fields["key"] = key + fields["policy"] = policyDoc + + return fields, nil +} + +func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string { + fields := map[string]string{ + algorithmHeader: algorithm, + dateHeader: t.UTC().Format("20060102T150405Z"), + credentialHeader: credentialStr, + } + + sessionToken := awsCredentials.SessionToken + if len(sessionToken) > 0 { + fields[tokenHeader] = sessionToken + } + + return fields +} + +// PresignPost defines the interface to presign a POST request +type PresignPost interface { + PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (fields map[string]string, err error) +} + +// PresignPostOptions represent the options to be passed to a PresignPost sign request +type PresignPostOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // PostPresigner to use. One will be created if none is provided + PostPresigner PresignPost + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // Conditions a list of extra conditions to pass to the policy document + // Available conditions can be found [here] + // + // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions + Conditions []interface{} +} + +type presignPostConverter PresignPostOptions + +// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware. +type presignPostRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner PresignPost + LogSigning bool + ExpiresIn time.Duration + Conditions []interface{} +} + +type presignPostRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner PresignPost + logSigning bool + expiresIn time.Duration + conditions []interface{} +} + +// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware +// initialized with the presigner. +func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware { + return &presignPostRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + expiresIn: options.ExpiresIn, + conditions: options.Conditions, + } +} + +// ID provides the middleware ID. +func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *presignPostRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + + input := getOperationInput(ctx) + asS3Put, ok := input.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("expected PutObjectInput") + } + bucketName, ok := asS3Put.bucket() + if !ok { + return out, metadata, fmt.Errorf("requested input bucketName not found on request") + } + uploadKey := asS3Put.Key + if uploadKey == nil { + return out, metadata, fmt.Errorf("PutObject input does not have a key input") + } + + uri := getS3ResolvedURI(ctx) + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime := sdk.NowTime().Add(skew) + expirationTime := signingTime.Add(s.expiresIn).UTC() + + fields, err := s.presigner.PresignPost( + credentials, + bucketName, + *uploadKey, + signingRegion, + signingName, + signingTime, + s.conditions, + expirationTime, + func(o *v4.SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedPostRequest{ + URL: uri, + Values: fields, + } + + return out, metadata, nil +} + +// Adapted from existing PresignConverter middleware +func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Build.Remove("UserAgent") + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + stack.Deserialize.Clear() + + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + + // if no expiration is set, set one + expiresIn := c.Expires + if expiresIn == 0 { + expiresIn = defaultExpiresIn + } + + pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.PostPresigner, + LogSigning: options.ClientLogMode.IsSigning(), + ExpiresIn: expiresIn, + Conditions: c.Conditions, + }) + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) { + initialConditions := []interface{}{ + map[string]string{ + algorithmHeader: algorithm, + }, + map[string]string{ + bucketHeader: bucket, + }, + map[string]string{ + credentialHeader: credentialString, + }, + map[string]string{ + dateHeader: signingTime.UTC().Format("20060102T150405Z"), + }, + } + + var conditions []interface{} + for _, v := range initialConditions { + conditions = append(conditions, v) + } + + if securityToken != nil && *securityToken != "" { + conditions = append(conditions, map[string]string{ + tokenHeader: *securityToken, + }) + } + + // append user-defined conditions at the end + conditions = append(conditions, extraConditions...) + + // The policy allows you to set a "key" value to specify what's the name of the + // key to add. Customers can add one by specifying one in their conditions, + // so we're checking if one has already been set. + // If none is found, restrict this to just the key name passed on the request + // This can be disabled by adding a condition that explicitly allows + // everything + if !isAlreadyCheckingForKey(conditions) { + conditions = append(conditions, map[string]string{"key": key}) + } + + policyDoc := map[string]interface{}{ + "conditions": conditions, + "expiration": expirationTime.Format(time.RFC3339), + } + + jsonBytes, err := json.Marshal(policyDoc) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(jsonBytes), nil +} + +func isAlreadyCheckingForKey(conditions []interface{}) bool { + // Need to check for two conditions: + // 1. A condition of the form ["starts-with", "$key", "mykey"] + // 2. A condition of the form {"key": "mykey"} + for _, c := range conditions { + slice, ok := c.([]interface{}) + if ok && len(slice) > 1 { + if slice[0] == "starts-with" && slice[1] == "$key" { + return true + } + } + m, ok := c.(map[string]interface{}) + if ok && len(m) > 0 { + for k := range m { + if k == "key" { + return true + } + } + } + // Repeat this but for map[string]string due to type constrains + ms, ok := c.(map[string]string) + if ok && len(ms) > 0 { + for k := range ms { + if k == "key" { + return true + } + } + } + } + return false +} + +// these methods have been copied from v4 implementation since they are not exported for public use +func hmacsha256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func buildSignature(strToSign, secret, service, region string, t time.Time) string { + key := deriveKey(secret, service, region, t) + return hex.EncodeToString(hmacsha256(key, []byte(strToSign))) +} + +func deriveKey(secret, service, region string, t time.Time) []byte { + hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout))) + hmacRegion := hmacsha256(hmacDate, []byte(region)) + hmacService := hmacsha256(hmacRegion, []byte(service)) + return hmacsha256(hmacService, []byte(aws4Request)) +} + +func buildCredentialScope(signingTime time.Time, region, service string) string { + return strings.Join([]string{ + signingTime.UTC().Format(shortDateLayout), + region, + service, + aws4Request, + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go index 59524bdcbd..f1136bb8e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -12,6 +12,7 @@ import ( smithyxml "github.com/aws/smithy-go/encoding/xml" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "strconv" @@ -28,6 +29,10 @@ func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -64,6 +69,8 @@ func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -71,11 +78,16 @@ func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipa return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatchInitiatedTime != nil { + locationName := "X-Amz-If-Match-Initiated-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime)) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -107,6 +119,10 @@ func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -118,7 +134,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -167,6 +183,8 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -174,31 +192,51 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -208,22 +246,27 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM } } + if v.MpuObjectSize != nil { + locationName := "X-Amz-Mp-Object-Size" + encoder.SetHeader(locationName).Long(*v.MpuObjectSize) + } + if len(v.RequestPayer) > 0 { locationName := "X-Amz-Request-Payer" encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -245,6 +288,10 @@ func (*awsRestxml_serializeOpCopyObject) ID() string { func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -281,6 +328,8 @@ func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { @@ -298,7 +347,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -308,32 +357,32 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -343,7 +392,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -353,27 +402,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -383,22 +432,22 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -415,9 +464,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -451,27 +498,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -481,7 +528,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } @@ -491,7 +538,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.TaggingDirective)) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -509,6 +556,10 @@ func (*awsRestxml_serializeOpCreateBucket) ID() string { func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -569,6 +620,8 @@ func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { @@ -581,27 +634,27 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -619,6 +672,107 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e return nil } +type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetadataTableConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataTableConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + type awsRestxml_serializeOpCreateMultipartUpload struct { } @@ -629,6 +783,10 @@ func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -640,7 +798,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -665,6 +823,8 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -682,7 +842,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -692,27 +852,32 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -722,22 +887,22 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -754,9 +919,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -785,27 +948,27 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -815,12 +978,12 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -838,6 +1001,10 @@ func (*awsRestxml_serializeOpCreateSession) ID() string { func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -874,6 +1041,8 @@ func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { @@ -881,11 +1050,31 @@ func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + if len(v.SessionMode) > 0 { locationName := "X-Amz-Create-Session-Mode" encoder.SetHeader(locationName).String(string(v.SessionMode)) } + if v.SSEKMSEncryptionContext != nil { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + return nil } @@ -899,6 +1088,10 @@ func (*awsRestxml_serializeOpDeleteBucket) ID() string { func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -935,6 +1128,8 @@ func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { @@ -942,7 +1137,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -960,6 +1155,10 @@ func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -996,6 +1195,8 @@ func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1003,7 +1204,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1025,6 +1226,10 @@ func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1061,6 +1266,8 @@ func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -1068,7 +1275,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCors return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1086,6 +1293,10 @@ func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1122,6 +1333,8 @@ func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -1129,7 +1342,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBuck return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1147,6 +1360,10 @@ func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() s func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1183,6 +1400,8 @@ func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) Hand } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1207,6 +1426,10 @@ func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1243,6 +1466,8 @@ func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1250,7 +1475,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1272,6 +1497,10 @@ func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1308,6 +1537,8 @@ func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { @@ -1315,7 +1546,74 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucke return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1333,6 +1631,10 @@ func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1369,6 +1671,8 @@ func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1376,7 +1680,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1398,6 +1702,10 @@ func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1434,6 +1742,8 @@ func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -1441,7 +1751,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *Del return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1459,6 +1769,10 @@ func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1495,6 +1809,8 @@ func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -1502,7 +1818,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1520,6 +1836,10 @@ func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1556,6 +1876,8 @@ func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -1563,7 +1885,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1581,6 +1903,10 @@ func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1617,6 +1943,8 @@ func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -1624,7 +1952,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1642,6 +1970,10 @@ func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1678,6 +2010,8 @@ func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -1685,7 +2019,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketW return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1703,6 +2037,10 @@ func (*awsRestxml_serializeOpDeleteObject) ID() string { func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1739,6 +2077,8 @@ func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { @@ -1751,11 +2091,26 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfMatchLastModifiedTime != nil { + locationName := "X-Amz-If-Match-Last-Modified-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime)) + } + + if v.IfMatchSize != nil { + locationName := "X-Amz-If-Match-Size" + encoder.SetHeader(locationName).Long(*v.IfMatchSize) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -1765,7 +2120,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e } } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1792,6 +2147,10 @@ func (*awsRestxml_serializeOpDeleteObjects) ID() string { func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1803,7 +2162,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") + opPath, opQuery := httpbinding.SplitURI("/?delete") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -1852,6 +2211,8 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { @@ -1869,12 +2230,12 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1897,6 +2258,10 @@ func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1933,6 +2298,8 @@ func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -1940,7 +2307,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1971,6 +2338,10 @@ func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2007,6 +2378,8 @@ func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -2014,7 +2387,7 @@ func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePub return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2032,6 +2405,10 @@ func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2068,6 +2445,8 @@ func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2075,7 +2454,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2098,6 +2477,10 @@ func (*awsRestxml_serializeOpGetBucketAcl) ID() string { func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2134,6 +2517,8 @@ func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { @@ -2141,7 +2526,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2159,6 +2544,10 @@ func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2195,6 +2584,8 @@ func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2202,7 +2593,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2224,6 +2615,10 @@ func (*awsRestxml_serializeOpGetBucketCors) ID() string { func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2260,6 +2655,8 @@ func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -2267,7 +2664,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2285,6 +2682,10 @@ func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2321,6 +2722,8 @@ func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -2328,7 +2731,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2346,6 +2749,10 @@ func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2382,6 +2789,8 @@ func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2406,6 +2815,10 @@ func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2442,6 +2855,8 @@ func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2449,7 +2864,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2471,6 +2886,10 @@ func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2507,6 +2926,8 @@ func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2514,7 +2935,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2532,6 +2953,10 @@ func (*awsRestxml_serializeOpGetBucketLocation) ID() string { func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2568,6 +2993,8 @@ func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { @@ -2575,7 +3002,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocati return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2593,6 +3020,10 @@ func (*awsRestxml_serializeOpGetBucketLogging) ID() string { func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2629,6 +3060,8 @@ func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -2636,7 +3069,74 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLogging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2654,6 +3154,10 @@ func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2690,6 +3194,8 @@ func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2697,7 +3203,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *Get return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2719,6 +3225,10 @@ func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2755,6 +3265,8 @@ func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2762,7 +3274,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2780,6 +3292,10 @@ func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2816,6 +3332,8 @@ func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -2823,7 +3341,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2841,6 +3359,10 @@ func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2877,6 +3399,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -2884,7 +3408,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyIn return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2902,6 +3426,10 @@ func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2938,6 +3466,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { @@ -2945,7 +3475,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2963,6 +3493,10 @@ func (*awsRestxml_serializeOpGetBucketReplication) ID() string { func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2999,6 +3533,8 @@ func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -3006,7 +3542,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketRep return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3024,6 +3560,10 @@ func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3060,6 +3600,8 @@ func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -3067,7 +3609,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucket return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3085,6 +3627,10 @@ func (*awsRestxml_serializeOpGetBucketTagging) ID() string { func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3121,6 +3667,8 @@ func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -3128,7 +3676,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3146,6 +3694,10 @@ func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3182,6 +3734,8 @@ func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -3189,7 +3743,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3207,6 +3761,10 @@ func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3243,6 +3801,8 @@ func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -3250,7 +3810,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsite return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3268,6 +3828,10 @@ func (*awsRestxml_serializeOpGetObject) ID() string { func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3304,6 +3868,8 @@ func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { @@ -3316,12 +3882,12 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -3331,7 +3897,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -3354,7 +3920,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -3388,17 +3954,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3420,6 +3986,10 @@ func (*awsRestxml_serializeOpGetObjectAcl) ID() string { func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3456,6 +4026,8 @@ func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { @@ -3463,7 +4035,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3499,6 +4071,10 @@ func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3535,6 +4111,8 @@ func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { @@ -3542,7 +4120,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3563,6 +4141,9 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr if v.ObjectAttributes != nil { locationName := "X-Amz-Object-Attributes" + if len(v.ObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.ObjectAttributes { if len(v.ObjectAttributes[i]) > 0 { escaped := string(v.ObjectAttributes[i]) @@ -3575,7 +4156,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr } } - if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + if v.PartNumberMarker != nil { locationName := "X-Amz-Part-Number-Marker" encoder.SetHeader(locationName).String(*v.PartNumberMarker) } @@ -3585,17 +4166,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3617,6 +4198,10 @@ func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3653,6 +4238,8 @@ func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -3660,7 +4247,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegal return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3696,6 +4283,10 @@ func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3732,6 +4323,8 @@ func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -3739,7 +4332,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObj return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3757,6 +4350,10 @@ func (*awsRestxml_serializeOpGetObjectRetention) ID() string { func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3793,6 +4390,8 @@ func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -3800,7 +4399,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3836,6 +4435,10 @@ func (*awsRestxml_serializeOpGetObjectTagging) ID() string { func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3872,6 +4475,8 @@ func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -3879,7 +4484,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3915,6 +4520,10 @@ func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3951,6 +4560,8 @@ func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { @@ -3958,7 +4569,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrent return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3990,6 +4601,10 @@ func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4026,6 +4641,8 @@ func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -4033,7 +4650,7 @@ func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAcc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4051,6 +4668,10 @@ func (*awsRestxml_serializeOpHeadBucket) ID() string { func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4087,6 +4708,8 @@ func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { @@ -4094,7 +4717,7 @@ func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4112,6 +4735,10 @@ func (*awsRestxml_serializeOpHeadObject) ID() string { func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4148,6 +4775,8 @@ func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { @@ -4160,12 +4789,12 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -4175,7 +4804,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -4198,7 +4827,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -4208,17 +4837,41 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -4240,6 +4893,10 @@ func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4276,6 +4933,8 @@ func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4287,7 +4946,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4305,6 +4964,10 @@ func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() st func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4341,6 +5004,8 @@ func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) Handl } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4365,6 +5030,10 @@ func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4401,6 +5070,8 @@ func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4412,7 +5083,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4430,6 +5101,10 @@ func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4466,6 +5141,8 @@ func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4477,7 +5154,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *L encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4495,6 +5172,10 @@ func (*awsRestxml_serializeOpListBuckets) ID() string { func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4522,11 +5203,17 @@ func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: err} } + if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request.Request, err = restEncoder.Encode(request.Request); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { @@ -4534,6 +5221,22 @@ func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, enc return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketRegion != nil { + encoder.SetQuery("bucket-region").String(*v.BucketRegion) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxBuckets != nil { + encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + return nil } @@ -4547,6 +5250,10 @@ func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4583,6 +5290,8 @@ func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { @@ -4611,6 +5320,10 @@ func (*awsRestxml_serializeOpListMultipartUploads) ID() string { func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4647,6 +5360,8 @@ func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { @@ -4662,7 +5377,7 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4701,6 +5416,10 @@ func (*awsRestxml_serializeOpListObjects) ID() string { func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4737,6 +5456,8 @@ func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { @@ -4752,7 +5473,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4767,6 +5488,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4801,6 +5525,10 @@ func (*awsRestxml_serializeOpListObjectsV2) ID() string { func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4837,6 +5565,8 @@ func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { @@ -4856,7 +5586,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4871,6 +5601,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4909,6 +5642,10 @@ func (*awsRestxml_serializeOpListObjectVersions) ID() string { func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4945,6 +5682,8 @@ func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { @@ -4960,7 +5699,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4975,6 +5714,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -5013,6 +5755,10 @@ func (*awsRestxml_serializeOpListParts) ID() string { func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5049,6 +5795,8 @@ func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { @@ -5056,7 +5804,7 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5083,17 +5831,17 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -5115,6 +5863,10 @@ func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5175,6 +5927,8 @@ func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5187,7 +5941,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v * encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5205,6 +5959,10 @@ func (*awsRestxml_serializeOpPutBucketAcl) ID() string { func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5265,6 +6023,8 @@ func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { @@ -5282,37 +6042,37 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -5330,6 +6090,10 @@ func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5390,6 +6154,8 @@ func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5397,7 +6163,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5419,6 +6185,10 @@ func (*awsRestxml_serializeOpPutBucketCors) ID() string { func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5479,6 +6249,8 @@ func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -5491,12 +6263,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5514,6 +6286,10 @@ func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5574,6 +6350,8 @@ func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -5586,12 +6364,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncr encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5609,6 +6387,10 @@ func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5669,6 +6451,8 @@ func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5693,6 +6477,10 @@ func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5753,6 +6541,8 @@ func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5760,7 +6550,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5782,6 +6572,10 @@ func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5842,6 +6636,8 @@ func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5854,11 +6650,16 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *P encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if len(v.TransitionDefaultMinimumObjectSize) > 0 { + locationName := "X-Amz-Transition-Default-Minimum-Object-Size" + encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize)) + } + return nil } @@ -5872,6 +6673,10 @@ func (*awsRestxml_serializeOpPutBucketLogging) ID() string { func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5932,6 +6737,8 @@ func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -5944,12 +6751,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLogging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5967,6 +6774,10 @@ func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6027,6 +6838,8 @@ func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6034,7 +6847,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *Put return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6056,6 +6869,10 @@ func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6116,6 +6933,8 @@ func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6123,7 +6942,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6146,6 +6965,10 @@ func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6206,6 +7029,8 @@ func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -6213,12 +7038,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6236,6 +7066,10 @@ func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6284,6 +7118,8 @@ func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -6301,12 +7137,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyIn encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6324,6 +7160,10 @@ func (*awsRestxml_serializeOpPutBucketReplication) ID() string { func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6384,6 +7224,8 @@ func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -6396,17 +7238,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketRep encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -6424,6 +7266,10 @@ func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6484,6 +7330,8 @@ func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -6496,12 +7344,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucket encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6519,6 +7367,10 @@ func (*awsRestxml_serializeOpPutBucketTagging) ID() string { func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6579,6 +7431,8 @@ func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -6591,12 +7445,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6614,6 +7468,10 @@ func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6674,6 +7532,8 @@ func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -6686,17 +7546,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVers encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -6714,6 +7574,10 @@ func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6774,6 +7638,8 @@ func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -6786,12 +7652,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsite encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6809,6 +7675,10 @@ func (*awsRestxml_serializeOpPutObject) ID() string { func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6857,6 +7727,8 @@ func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { @@ -6874,7 +7746,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -6884,37 +7756,42 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -6924,17 +7801,17 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6944,26 +7821,36 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -6976,9 +7863,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -7007,27 +7892,27 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -7037,16 +7922,21 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } + if v.WriteOffsetBytes != nil { + locationName := "X-Amz-Write-Offset-Bytes" + encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes) + } + return nil } @@ -7060,6 +7950,10 @@ func (*awsRestxml_serializeOpPutObjectAcl) ID() string { func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7120,6 +8014,8 @@ func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { @@ -7137,37 +8033,37 @@ func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -7203,6 +8099,10 @@ func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7263,6 +8163,8 @@ func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -7275,12 +8177,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegal encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7316,6 +8218,10 @@ func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7376,6 +8282,8 @@ func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -7388,12 +8296,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7403,7 +8311,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -7421,6 +8329,10 @@ func (*awsRestxml_serializeOpPutObjectRetention) ID() string { func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7481,6 +8393,8 @@ func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -7498,12 +8412,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectReten encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7539,6 +8453,10 @@ func (*awsRestxml_serializeOpPutObjectTagging) ID() string { func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7599,6 +8517,8 @@ func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -7611,12 +8531,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7652,6 +8572,10 @@ func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7712,6 +8636,8 @@ func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -7724,12 +8650,12 @@ func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAcc encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7747,6 +8673,10 @@ func (*awsRestxml_serializeOpRestoreObject) ID() string { func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7758,7 +8688,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7807,6 +8737,8 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { @@ -7819,7 +8751,7 @@ func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7855,6 +8787,10 @@ func (*awsRestxml_serializeOpSelectObjectContent) ID() string { func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7866,7 +8802,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7909,6 +8845,8 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { @@ -7916,7 +8854,7 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7930,17 +8868,17 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC } } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8037,6 +8975,10 @@ func (*awsRestxml_serializeOpUploadPart) ID() string { func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8085,6 +9027,8 @@ func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { @@ -8097,22 +9041,27 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } @@ -8122,12 +9071,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -8150,17 +9099,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8182,6 +9131,10 @@ func (*awsRestxml_serializeOpUploadPartCopy) ID() string { func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8218,6 +9171,8 @@ func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { @@ -8225,12 +9180,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -8240,7 +9195,7 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -8250,32 +9205,32 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + if v.CopySourceRange != nil { locationName := "X-Amz-Copy-Source-Range" encoder.SetHeader(locationName).String(*v.CopySourceRange) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -8298,17 +9253,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8330,6 +9285,10 @@ func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8341,7 +9300,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -8378,6 +9337,8 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { @@ -8385,7 +9346,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + if v.AcceptRanges != nil { locationName := "X-Amz-Fwd-Header-Accept-Ranges" encoder.SetHeader(locationName).String(*v.AcceptRanges) } @@ -8395,42 +9356,47 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "X-Amz-Fwd-Header-Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "X-Amz-Fwd-Header-Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "X-Amz-Fwd-Header-Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "X-Amz-Fwd-Header-Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -8440,12 +9406,12 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentRange != nil && len(*v.ContentRange) > 0 { + if v.ContentRange != nil { locationName := "X-Amz-Fwd-Header-Content-Range" encoder.SetHeader(locationName).String(*v.ContentRange) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "X-Amz-Fwd-Header-Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } @@ -8455,22 +9421,22 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) } - if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + if v.ErrorCode != nil { locationName := "X-Amz-Fwd-Error-Code" encoder.SetHeader(locationName).String(*v.ErrorCode) } - if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + if v.ErrorMessage != nil { locationName := "X-Amz-Fwd-Error-Message" encoder.SetHeader(locationName).String(*v.ErrorMessage) } - if v.ETag != nil && len(*v.ETag) > 0 { + if v.ETag != nil { locationName := "X-Amz-Fwd-Header-Etag" encoder.SetHeader(locationName).String(*v.ETag) } - if v.Expiration != nil && len(*v.Expiration) > 0 { + if v.Expiration != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" encoder.SetHeader(locationName).String(*v.Expiration) } @@ -8488,9 +9454,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -8529,17 +9493,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.RequestCharged)) } - if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + if v.RequestRoute != nil { locationName := "X-Amz-Request-Route" encoder.SetHeader(locationName).String(*v.RequestRoute) } - if v.RequestToken != nil && len(*v.RequestToken) > 0 { + if v.RequestToken != nil { locationName := "X-Amz-Request-Token" encoder.SetHeader(locationName).String(*v.RequestToken) } - if v.Restore != nil && len(*v.Restore) > 0 { + if v.Restore != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Restore" encoder.SetHeader(locationName).String(*v.Restore) } @@ -8549,17 +9513,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -8579,7 +9543,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Integer(*v.TagCount) } - if v.VersionId != nil && len(*v.VersionId) > 0 { + if v.VersionId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" encoder.SetHeader(locationName).String(*v.VersionId) } @@ -8995,6 +9959,17 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi el := value.MemberElement(root) el.String(*v.ChecksumCRC32C) } + if v.ChecksumCRC64NVME != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC64NVME", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC64NVME) + } if v.ChecksumSHA1 != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10587,71 +11562,66 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule return nil } -func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil { return err } - - case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.ObjectSizeGreaterThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeGreaterThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberObjectSizeLessThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeLessThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.LifecycleRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -10778,6 +11748,24 @@ func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smi return nil } +func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.S3TablesDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3TablesDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil { + return err + } + } + return nil +} + func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { defer value.Close() if v.EventThreshold != nil { @@ -11091,6 +12079,17 @@ func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.Notifi func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { defer value.Close() + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } if v.Key != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11102,6 +12101,28 @@ func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, val el := value.MemberElement(root) el.String(*v.Key) } + if v.LastModifiedTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LastModifiedTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime)) + } + if v.Size != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Size", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.Size) + } if v.VersionId != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11759,49 +12780,44 @@ func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.Replication return nil } -func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil { return err } - - case *types.ReplicationRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.ReplicationRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -12158,6 +13174,33 @@ func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml return nil } +func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error { + defer value.Close() + if v.TableBucketArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableBucketArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableBucketArn) + } + if v.TableName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableName) + } + return nil +} + func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { defer value.Close() if v.End != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index ea3b9c82ac..ac5ab0bef6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -11,6 +11,7 @@ const ( // Values returns all known values for AnalyticsS3ExportFileFormat. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { return []AnalyticsS3ExportFileFormat{ @@ -27,8 +28,9 @@ const ( ) // Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ArchiveStatus) Values() []ArchiveStatus { return []ArchiveStatus{ "ARCHIVE_ACCESS", @@ -45,8 +47,9 @@ const ( ) // Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { return []BucketAccelerateStatus{ "Enabled", @@ -65,8 +68,9 @@ const ( ) // Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketCannedACL) Values() []BucketCannedACL { return []BucketCannedACL{ "private", @@ -90,17 +94,22 @@ const ( BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" + BucketLocationConstraintApSoutheast4 BucketLocationConstraint = "ap-southeast-4" + BucketLocationConstraintApSoutheast5 BucketLocationConstraint = "ap-southeast-5" BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" BucketLocationConstraintEu BucketLocationConstraint = "EU" BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuCentral2 BucketLocationConstraint = "eu-central-2" BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintIlCentral1 BucketLocationConstraint = "il-central-1" + BucketLocationConstraintMeCentral1 BucketLocationConstraint = "me-central-1" BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" @@ -112,6 +121,7 @@ const ( // Values returns all known values for BucketLocationConstraint. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BucketLocationConstraint) Values() []BucketLocationConstraint { return []BucketLocationConstraint{ @@ -125,17 +135,22 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", + "ap-southeast-4", + "ap-southeast-5", "ca-central-1", "cn-north-1", "cn-northwest-1", "EU", "eu-central-1", + "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", + "il-central-1", + "me-central-1", "me-south-1", "sa-east-1", "us-east-2", @@ -156,8 +171,9 @@ const ( ) // Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketLogsPermission) Values() []BucketLogsPermission { return []BucketLogsPermission{ "FULL_CONTROL", @@ -174,8 +190,9 @@ const ( ) // Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketType) Values() []BucketType { return []BucketType{ "Directory", @@ -191,8 +208,9 @@ const ( ) // Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketVersioningStatus) Values() []BucketVersioningStatus { return []BucketVersioningStatus{ "Enabled", @@ -204,21 +222,24 @@ type ChecksumAlgorithm string // Enum values for ChecksumAlgorithm const ( - ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" - ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" - ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" - ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME" ) // Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { return []ChecksumAlgorithm{ "CRC32", "CRC32C", "SHA1", "SHA256", + "CRC64NVME", } } @@ -230,14 +251,34 @@ const ( ) // Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumMode) Values() []ChecksumMode { return []ChecksumMode{ "ENABLED", } } +type ChecksumType string + +// Enum values for ChecksumType +const ( + ChecksumTypeComposite ChecksumType = "COMPOSITE" + ChecksumTypeFullObject ChecksumType = "FULL_OBJECT" +) + +// Values returns all known values for ChecksumType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumType) Values() []ChecksumType { + return []ChecksumType{ + "COMPOSITE", + "FULL_OBJECT", + } +} + type CompressionType string // Enum values for CompressionType @@ -248,8 +289,9 @@ const ( ) // Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (CompressionType) Values() []CompressionType { return []CompressionType{ "NONE", @@ -263,14 +305,17 @@ type DataRedundancy string // Enum values for DataRedundancy const ( DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" + DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone" ) // Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DataRedundancy) Values() []DataRedundancy { return []DataRedundancy{ "SingleAvailabilityZone", + "SingleLocalZone", } } @@ -284,8 +329,9 @@ const ( // Values returns all known values for DeleteMarkerReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { return []DeleteMarkerReplicationStatus{ "Enabled", @@ -301,8 +347,9 @@ const ( ) // Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EncodingType) Values() []EncodingType { return []EncodingType{ "url", @@ -343,8 +390,9 @@ const ( ) // Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Event) Values() []Event { return []Event{ "s3:ReducedRedundancyLostObject", @@ -387,8 +435,9 @@ const ( // Values returns all known values for ExistingObjectReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { return []ExistingObjectReplicationStatus{ "Enabled", @@ -405,8 +454,9 @@ const ( ) // Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpirationStatus) Values() []ExpirationStatus { return []ExpirationStatus{ "Enabled", @@ -422,8 +472,9 @@ const ( ) // Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpressionType) Values() []ExpressionType { return []ExpressionType{ "SQL", @@ -440,8 +491,9 @@ const ( ) // Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FileHeaderInfo) Values() []FileHeaderInfo { return []FileHeaderInfo{ "USE", @@ -459,8 +511,9 @@ const ( ) // Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FilterRuleName) Values() []FilterRuleName { return []FilterRuleName{ "prefix", @@ -478,8 +531,9 @@ const ( // Values returns all known values for IntelligentTieringAccessTier. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { return []IntelligentTieringAccessTier{ "ARCHIVE_ACCESS", @@ -497,6 +551,7 @@ const ( // Values returns all known values for IntelligentTieringStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { return []IntelligentTieringStatus{ @@ -515,8 +570,9 @@ const ( ) // Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFormat) Values() []InventoryFormat { return []InventoryFormat{ "CSV", @@ -534,8 +590,9 @@ const ( ) // Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFrequency) Values() []InventoryFrequency { return []InventoryFrequency{ "Daily", @@ -553,8 +610,9 @@ const ( // Values returns all known values for InventoryIncludedObjectVersions. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { return []InventoryIncludedObjectVersions{ "All", @@ -584,8 +642,9 @@ const ( ) // Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryOptionalField) Values() []InventoryOptionalField { return []InventoryOptionalField{ "Size", @@ -615,8 +674,9 @@ const ( ) // Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (JSONType) Values() []JSONType { return []JSONType{ "DOCUMENT", @@ -629,14 +689,17 @@ type LocationType string // Enum values for LocationType const ( LocationTypeAvailabilityZone LocationType = "AvailabilityZone" + LocationTypeLocalZone LocationType = "LocalZone" ) // Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (LocationType) Values() []LocationType { return []LocationType{ "AvailabilityZone", + "LocalZone", } } @@ -649,8 +712,9 @@ const ( ) // Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetadataDirective) Values() []MetadataDirective { return []MetadataDirective{ "COPY", @@ -667,8 +731,9 @@ const ( ) // Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetricsStatus) Values() []MetricsStatus { return []MetricsStatus{ "Enabled", @@ -685,8 +750,9 @@ const ( ) // Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADelete) Values() []MFADelete { return []MFADelete{ "Enabled", @@ -703,8 +769,9 @@ const ( ) // Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADeleteStatus) Values() []MFADeleteStatus { return []MFADeleteStatus{ "Enabled", @@ -724,8 +791,9 @@ const ( ) // Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectAttributes) Values() []ObjectAttributes { return []ObjectAttributes{ "ETag", @@ -750,8 +818,9 @@ const ( ) // Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectCannedACL) Values() []ObjectCannedACL { return []ObjectCannedACL{ "private", @@ -772,8 +841,9 @@ const ( ) // Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockEnabled) Values() []ObjectLockEnabled { return []ObjectLockEnabled{ "Enabled", @@ -790,6 +860,7 @@ const ( // Values returns all known values for ObjectLockLegalHoldStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { return []ObjectLockLegalHoldStatus{ @@ -807,8 +878,9 @@ const ( ) // Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockMode) Values() []ObjectLockMode { return []ObjectLockMode{ "GOVERNANCE", @@ -825,8 +897,9 @@ const ( ) // Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { return []ObjectLockRetentionMode{ "GOVERNANCE", @@ -844,8 +917,9 @@ const ( ) // Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectOwnership) Values() []ObjectOwnership { return []ObjectOwnership{ "BucketOwnerPreferred", @@ -872,8 +946,9 @@ const ( ) // Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectStorageClass) Values() []ObjectStorageClass { return []ObjectStorageClass{ "STANDARD", @@ -899,6 +974,7 @@ const ( // Values returns all known values for ObjectVersionStorageClass. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { return []ObjectVersionStorageClass{ @@ -915,6 +991,7 @@ const ( // Values returns all known values for OptionalObjectAttributes. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { return []OptionalObjectAttributes{ @@ -930,8 +1007,9 @@ const ( ) // Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (OwnerOverride) Values() []OwnerOverride { return []OwnerOverride{ "Destination", @@ -947,8 +1025,9 @@ const ( ) // Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (PartitionDateSource) Values() []PartitionDateSource { return []PartitionDateSource{ "EventTime", @@ -965,8 +1044,9 @@ const ( ) // Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Payer) Values() []Payer { return []Payer{ "Requester", @@ -986,8 +1066,9 @@ const ( ) // Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Permission) Values() []Permission { return []Permission{ "FULL_CONTROL", @@ -1007,8 +1088,9 @@ const ( ) // Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Protocol) Values() []Protocol { return []Protocol{ "http", @@ -1025,8 +1107,9 @@ const ( ) // Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (QuoteFields) Values() []QuoteFields { return []QuoteFields{ "ALWAYS", @@ -1044,6 +1127,7 @@ const ( // Values returns all known values for ReplicaModificationsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { return []ReplicaModificationsStatus{ @@ -1061,8 +1145,9 @@ const ( ) // Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { return []ReplicationRuleStatus{ "Enabled", @@ -1082,8 +1167,9 @@ const ( ) // Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationStatus) Values() []ReplicationStatus { return []ReplicationStatus{ "COMPLETE", @@ -1103,8 +1189,9 @@ const ( ) // Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { return []ReplicationTimeStatus{ "Enabled", @@ -1120,8 +1207,9 @@ const ( ) // Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestCharged) Values() []RequestCharged { return []RequestCharged{ "requester", @@ -1136,8 +1224,9 @@ const ( ) // Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestPayer) Values() []RequestPayer { return []RequestPayer{ "requester", @@ -1152,8 +1241,9 @@ const ( ) // Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RestoreRequestType) Values() []RestoreRequestType { return []RestoreRequestType{ "SELECT", @@ -1170,8 +1260,9 @@ const ( ) // Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ServerSideEncryption) Values() []ServerSideEncryption { return []ServerSideEncryption{ "AES256", @@ -1189,8 +1280,9 @@ const ( ) // Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SessionMode) Values() []SessionMode { return []SessionMode{ "ReadOnly", @@ -1208,8 +1300,9 @@ const ( // Values returns all known values for SseKmsEncryptedObjectsStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { return []SseKmsEncryptedObjectsStatus{ "Enabled", @@ -1235,8 +1328,9 @@ const ( ) // Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClass) Values() []StorageClass { return []StorageClass{ "STANDARD", @@ -1262,8 +1356,9 @@ const ( // Values returns all known values for StorageClassAnalysisSchemaVersion. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { return []StorageClassAnalysisSchemaVersion{ "V_1", @@ -1279,8 +1374,9 @@ const ( ) // Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TaggingDirective) Values() []TaggingDirective { return []TaggingDirective{ "COPY", @@ -1298,8 +1394,9 @@ const ( ) // Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Tier) Values() []Tier { return []Tier{ "Standard", @@ -1308,6 +1405,26 @@ func (Tier) Values() []Tier { } } +type TransitionDefaultMinimumObjectSize string + +// Enum values for TransitionDefaultMinimumObjectSize +const ( + TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class" + TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K" +) + +// Values returns all known values for TransitionDefaultMinimumObjectSize. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize { + return []TransitionDefaultMinimumObjectSize{ + "varies_by_storage_class", + "all_storage_classes_128K", + } +} + type TransitionStorageClass string // Enum values for TransitionStorageClass @@ -1321,8 +1438,9 @@ const ( ) // Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TransitionStorageClass) Values() []TransitionStorageClass { return []TransitionStorageClass{ "GLACIER", @@ -1344,8 +1462,9 @@ const ( ) // Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Type) Values() []Type { return []Type{ "CanonicalUser", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go index 166484f4ec..1070573a5a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -64,14 +64,46 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. If the object you are -// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 -// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access -// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can -// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. +// The existing object was created with a different encryption type. Subsequent +// +// write requests must include the appropriate encryption parameters in the request +// or while creating the session. +type EncryptionTypeMismatch struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *EncryptionTypeMismatch) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *EncryptionTypeMismatch) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *EncryptionTypeMismatch) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EncryptionTypeMismatch" + } + return *e.ErrorCodeOverride +} +func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 +// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html type InvalidObjectState struct { Message *string @@ -100,6 +132,69 @@ func (e *InvalidObjectState) ErrorCode() string { } func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// You may receive this error in multiple cases. Depending on the reason for the +// error, you may receive one of the messages below: +// +// - Cannot specify both a write offset value and user-defined object metadata +// for existing objects. +// +// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual +// checksum Type: crc32c. +// +// - Request body cannot be empty when 'write offset' is specified. +type InvalidRequest struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequest) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequest) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequest) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequest" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The write offset value that you specified does not match the current object +// +// size. +type InvalidWriteOffset struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidWriteOffset) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidWriteOffset) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidWriteOffset) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidWriteOffset" + } + return *e.ErrorCodeOverride +} +func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The specified bucket does not exist. type NoSuchBucket struct { Message *string @@ -256,3 +351,32 @@ func (e *ObjectNotInActiveTierError) ErrorCode() string { return *e.ErrorCodeOverride } func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You have attempted to add more parts than the maximum of 10000 that are +// +// allowed for this object. You can use the CopyObject operation to copy this +// object to another and then add more data to the newly copied object. +type TooManyParts struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyParts) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyParts) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyParts) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyParts" + } + return *e.ErrorCodeOverride +} +func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index 4299b57cc6..46065ab400 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -9,9 +9,9 @@ import ( // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon S3 User Guide. +// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. +// +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete @@ -22,8 +22,9 @@ type AbortIncompleteMultipartUpload struct { } // Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. +// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. +// +// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html type AccelerateConfiguration struct { // Specifies the transfer acceleration status of the bucket. @@ -47,9 +48,10 @@ type AccessControlPolicy struct { // A container for information about access control for replicas. type AccessControlTranslation struct { - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon S3 API Reference. + // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the + // Amazon S3 API Reference. + // + // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html // // This member is required. Owner OwnerOverride @@ -82,7 +84,7 @@ type AnalyticsConfiguration struct { // This member is required. Id *string - // Contains data related to access patterns to be collected and made available to + // Contains data related to access patterns to be collected and made available to // analyze the tradeoffs between different storage classes. // // This member is required. @@ -162,9 +164,10 @@ type AnalyticsS3BucketDestination struct { Format AnalyticsS3ExportFileFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. BucketAccountId *string // The prefix to use when exporting data. The prefix is prepended to all results. @@ -176,6 +179,11 @@ type AnalyticsS3BucketDestination struct { // In terms of implementation, a Bucket is a resource. type Bucket struct { + // BucketRegion indicates the Amazon Web Services region where the bucket is + // located. If the request contains at least one valid parameter, it is included in + // the response. + BucketRegion *string + // Date the bucket was created. This date can change when making changes to your // bucket, such as editing its bucket policy. CreationDate *time.Time @@ -187,12 +195,15 @@ type Bucket struct { } // Specifies the information about the bucket that will be created. For more -// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type BucketInfo struct { - // The number of Availability Zone that's used for redundancy for the bucket. + // The number of Zone (Availability Zone or Local Zone) that's used for redundancy + // for the bucket. DataRedundancy DataRedundancy // The type of bucket. @@ -202,8 +213,9 @@ type BucketInfo struct { } // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html type BucketLifecycleConfiguration struct { // A lifecycle rule for individual objects in an Amazon S3 bucket. @@ -218,8 +230,10 @@ type BucketLifecycleConfiguration struct { type BucketLoggingStatus struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *LoggingEnabled noSmithyDocumentSerde @@ -228,42 +242,65 @@ type BucketLoggingStatus struct { // Contains all the possible checksum or digest values for an object. type Checksum struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // the object was uploaded without a checksum (and Amazon S3 added the default + // checksum, CRC64NVME , to the uploaded object). For more information, see [Checking object integrity] in + // the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + noSmithyDocumentSerde } @@ -283,8 +320,10 @@ type CommonPrefix struct { // The container for the completed multipart upload details. type CompletedMultipartUpload struct { - // Array of CompletedPart data types. If you do not supply a valid Part with your - // request, the service sends back an HTTP 400 response. + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back an + // HTTP 400 response. Parts []CompletedPart noSmithyDocumentSerde @@ -293,40 +332,40 @@ type CompletedMultipartUpload struct { // Details of the parts that were uploaded. type CompletedPart struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -334,12 +373,14 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. + // // - General purpose buckets - In CompleteMultipartUpload , when a additional // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the // PartNumber must start at 1 and the part numbers must be consecutive. // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an // InvalidPartOrder error code. + // // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start // at 1 and the part numbers must be consecutive. PartNumber *int32 @@ -366,10 +407,12 @@ type Condition struct { // be /docs , which identifies all objects in the docs/ folder. Required when the // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints KeyPrefixEquals *string noSmithyDocumentSerde @@ -382,30 +425,49 @@ type ContinuationEvent struct { // Container for all response elements. type CopyObjectResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object being copied was uploaded with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. ETag *string @@ -419,40 +481,44 @@ type CopyObjectResult struct { // Container for all response elements. type CopyPartResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32C checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 256-bit SHA256 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag of the object. @@ -465,8 +531,9 @@ type CopyPartResult struct { } // Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. +// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. +// +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html type CORSConfiguration struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -515,23 +582,37 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the information about the bucket that will be created. This - // functionality is only supported by directory buckets. + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. Bucket *BucketInfo - // Specifies the location where the bucket will be created. For directory buckets, - // the location type is Availability Zone. This functionality is only supported by - // directory buckets. + // Specifies the location where the bucket will be created. + // + // Directory buckets - The location type is Availability Zone or Local Zone. To + // use the Local Zone location type, your account must be enabled for Local Zones. + // Otherwise, you get an HTTP 403 Forbidden error with the error code AccessDenied + // . To learn more, see [Enable accounts for Local Zones]in the Amazon S3 User Guide. + // + // This functionality is only supported by directory buckets. + // + // [Enable accounts for Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html Location *LocationInfo // Specifies the Region where the bucket will be created. You might choose a // Region to optimize latency, minimize costs, or address regulatory requirements. // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. For more information, see - // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is - // created in the US East (N. Virginia) Region (us-east-1) by default. This - // functionality is not supported for directory buckets. + // create buckets in the Europe (Ireland) Region. + // + // If you don't specify a Region, the bucket is created in the US East (N. + // Virginia) Region (us-east-1) by default. Configurations using the value EU will + // create a bucket in eu-west-1 . + // + // For a list of the valid values for all of the Amazon Web Services Regions, see [Regions and Endpoints]. + // + // This functionality is not supported for directory buckets. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -548,7 +629,9 @@ type CSVInput struct { // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . Default: # + // indicate a comment line. The default character is # . + // + // Default: # Comments *string // A single character used to separate individual fields in a record. You can @@ -556,17 +639,26 @@ type CSVInput struct { FieldDelimiter *string // Describes the first line of input. Valid values are: + // // - NONE : First line is not a header. + // // - IGNORE : First line is a header, but you can't use the header values to // indicate the column in an expression. You can use column position (such as _1, // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // // - Use : First line is a header, and you can use the header value to identify a // column in an expression ( SELECT "name" FROM OBJECT ). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV + // quotation marks, as follows: " a , b " . + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string // A single character used for escaping the quotation mark character inside an @@ -599,7 +691,9 @@ type CSVOutput struct { QuoteEscapeCharacter *string // Indicates whether to use quotation marks around output fields. + // // - ALWAYS : Always use quotation marks for output fields. + // // - ASNEEDED : Use quotation marks for output fields when needed. QuoteFields QuoteFields @@ -610,9 +704,11 @@ type CSVOutput struct { noSmithyDocumentSerde } -// The container element for specifying the default Object Lock retention settings -// for new objects placed in the specified bucket. +// The container element for optionally specifying the default Object Lock +// retention settings for new objects placed in the specified bucket. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. type DefaultRetention struct { @@ -635,10 +731,12 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The object to delete. Directory buckets - For directory buckets, an object - // that's composed entirely of whitespace characters is not supported by the - // DeleteObjects API operation. The request will receive a 400 Bad Request error - // and none of the objects in the request will be deleted. + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects in the + // request will be deleted. // // This member is required. Objects []ObjectIdentifier @@ -656,21 +754,26 @@ type DeletedObject struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. This functionality is - // not supported for directory buckets. + // header is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. This functionality is not supported for - // directory buckets. + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -689,7 +792,7 @@ type DeleteMarkerEntry struct { // Date and time when the object was last modified. LastModified *time.Time - // The account that created the delete marker.> + // The account that created the delete marker. Owner *Owner // Version ID of an object. @@ -703,17 +806,20 @@ type DeleteMarkerEntry struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example -// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) -// . For more information about delete marker replication, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) -// . If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, see -// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . +// configuration, see [Basic Rule Configuration]. +// +// For more information about delete marker replication, see [Basic Rule Configuration]. +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations type DeleteMarkerReplication struct { - // Indicates whether to replicate delete markers. Indicates whether to replicate - // delete markers. + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. Status DeleteMarkerReplicationStatus noSmithyDocumentSerde @@ -723,7 +829,7 @@ type DeleteMarkerReplication struct { // for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store // the results. // // This member is required. @@ -740,29 +846,32 @@ type Destination struct { // Amazon S3 to change replica ownership to the Amazon Web Services account that // owns the destination bucket by specifying the AccessControlTranslation // property, this is the account ID of the destination bucket owner. For more - // information, see Replication Additional Configuration: Changing the Replica - // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon S3 User Guide. + // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. + // + // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html Account *string // A container that provides information about encryption. If // SourceSelectionCriteria is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration - // A container specifying replication metrics-related settings enabling + // A container specifying replication metrics-related settings enabling // replication metrics and events. Metrics *Metrics - // A container specifying S3 Replication Time Control (S3 RTC), including whether + // A container specifying S3 Replication Time Control (S3 RTC), including whether // S3 RTC is enabled and the time when all objects and operations on objects must // be replicated. Must be specified together with a Metrics block. ReplicationTime *ReplicationTime - // The storage class to use when replicating objects, such as S3 Standard or + // The storage class to use when replicating objects, such as S3 Standard or // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. For valid values, see the StorageClass - // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon S3 API Reference. + // object to create the object replica. + // + // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 + // API Reference. + // + // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html StorageClass StorageClass noSmithyDocumentSerde @@ -784,8 +893,9 @@ type Encryption struct { // If the encryption type is aws:kms , this optional value specifies the ID of the // symmetric encryption customer managed key to use for encryption of job results. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSKeyId *string noSmithyDocumentSerde @@ -793,14 +903,21 @@ type Encryption struct { // Specifies encryption-related information for an Amazon S3 bucket that is a // destination for replicated objects. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. type EncryptionConfiguration struct { // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for // the destination bucket. Amazon S3 uses this key to encrypt replica objects. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html ReplicaKmsKeyID *string noSmithyDocumentSerde @@ -819,414 +936,766 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It is // meant to be read and understood by programs that detect and handle errors by // type. The following is a list of Amazon S3 error codes. For more information, - // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) - // . + // see [Error responses]. + // // - Code: AccessDenied + // // - Description: Access Denied + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AccountProblem + // // - Description: There is a problem with your Amazon Web Services account that // prevents the action from completing successfully. Contact Amazon Web Services // Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AllAccessDisabled + // // - Description: All access to this Amazon S3 resource has been disabled. // Contact Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AmbiguousGrantByEmailAddress + // // - Description: The email address you provided is associated with more than // one account. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: AuthorizationHeaderMalformed + // // - Description: The authorization header you provided is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - HTTP Status Code: N/A + // // - Code: BadDigest + // // - Description: The Content-MD5 you specified did not match what we received. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyExists + // // - Description: The requested bucket name is not available. The bucket // namespace is shared by all users of the system. Please select a different name // and try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyOwnedByYou + // // - Description: The bucket you tried to create already exists, and you own it. // Amazon S3 returns this error in all Amazon Web Services Regions except in the // North Virginia Region. For legacy compatibility, if you re-create an existing // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 // OK and resets the bucket access control lists (ACLs). + // // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketNotEmpty + // // - Description: The bucket you tried to delete is not empty. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: CredentialsNotSupported + // // - Description: This request does not support credentials. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: CrossLocationLoggingProhibited + // // - Description: Cross-location logging not allowed. Buckets in one geographic // location cannot log information to a bucket in another location. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooSmall + // // - Description: Your proposed upload is smaller than the minimum allowed // object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooLarge + // // - Description: Your proposed upload exceeds the maximum allowed object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: ExpiredToken + // // - Description: The provided token has expired. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IllegalVersioningConfigurationException + // // - Description: Indicates that the versioning configuration specified in the // request is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncompleteBody + // // - Description: You did not provide the number of bytes specified by the // Content-Length HTTP header + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncorrectNumberOfFilesInPostRequest + // // - Description: POST requires exactly one file upload per request. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InlineDataTooLarge + // // - Description: Inline data exceeds the maximum allowed size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InternalError + // // - Description: We encountered an internal error. Please try again. + // // - HTTP Status Code: 500 Internal Server Error + // // - SOAP Fault Code Prefix: Server + // // - Code: InvalidAccessKeyId + // // - Description: The Amazon Web Services access key ID you provided does not // exist in our records. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidAddressingHeader + // // - Description: You must specify the Anonymous role. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidArgument + // // - Description: Invalid Argument + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketName + // // - Description: The specified bucket is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketState + // // - Description: The request is not valid with the current state of the bucket. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidDigest + // // - Description: The Content-MD5 you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidEncryptionAlgorithmError + // // - Description: The encryption request you specified is not valid. The valid // value is AES256. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidLocationConstraint + // // - Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // . + // information about Regions, see [How to Select a Region for Your Buckets]. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidObjectState + // // - Description: The action is not valid for the current state of the object. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPart + // // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified entity tag might not have // matched the part's entity tag. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPartOrder + // // - Description: The list of parts was not in ascending order. Parts list must // be specified in order by part number. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPayer + // // - Description: All access to this object has been disabled. Please contact // Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPolicyDocument + // // - Description: The content of the form does not meet the conditions specified // in the policy document. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRange + // // - Description: The requested range cannot be satisfied. + // // - HTTP Status Code: 416 Requested Range Not Satisfiable + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Please use AWS4-HMAC-SHA256 . + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: SOAP requests must be made over an HTTPS connection. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with non-DNS compliant names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with periods (.) in their names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual // style requests. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest - // - Description: Amazon S3 Transfer Accelerate is not configured on this - // bucket. + // + // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidSecurity + // // - Description: The provided security credentials are not valid. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidSOAPRequest + // // - Description: The SOAP request body is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidStorageClass + // // - Description: The storage class you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidTargetBucketForLogging + // // - Description: The target bucket for logging does not exist, is not owned by // you, or does not have the appropriate grants for the log-delivery group. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidToken + // // - Description: The provided token is malformed or otherwise invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidURI + // // - Description: Couldn't parse the specified URI. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: KeyTooLongError + // // - Description: Your key is too long. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedACLError + // // - Description: The XML you provided was not well-formed or did not validate // against our published schema. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedPOSTRequest + // // - Description: The body of your POST request is not well-formed // multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedXML + // // - Description: This happens when the user sends malformed XML (XML that // doesn't conform to the published XSD) for the configuration. The error message // is, "The XML you provided was not well-formed or did not validate against our // published schema." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxMessageLengthExceeded + // // - Description: Your request was too big. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxPostPreDataLengthExceededError + // // - Description: Your POST request fields preceding the upload file were too // large. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MetadataTooLarge - // - Description: Your metadata headers exceed the maximum allowed metadata - // size. + // + // - Description: Your metadata headers exceed the maximum allowed metadata size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MethodNotAllowed + // // - Description: The specified method is not allowed against this resource. + // // - HTTP Status Code: 405 Method Not Allowed + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingAttachment + // // - Description: A SOAP attachment was expected, but none were found. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingContentLength + // // - Description: You must provide the Content-Length HTTP header. + // // - HTTP Status Code: 411 Length Required + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingRequestBodyError + // // - Description: This happens when the user sends an empty XML document as a // request. The error message is, "Request body is empty." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityElement + // // - Description: The SOAP 1.1 request is missing a security element. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityHeader + // // - Description: Your request is missing a required header. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoLoggingStatusForKey + // // - Description: There is no such thing as a logging status subresource for a // key. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucket + // // - Description: The specified bucket does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucketPolicy + // // - Description: The specified bucket does not have a bucket policy. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchKey + // // - Description: The specified key does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchLifecycleConfiguration + // // - Description: The lifecycle configuration does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchUpload + // // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchVersion + // // - Description: Indicates that the version ID specified in the request does // not match an existing version. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NotImplemented + // // - Description: A header you provided implies functionality that is not // implemented. + // // - HTTP Status Code: 501 Not Implemented + // // - SOAP Fault Code Prefix: Server + // // - Code: NotSignedUp + // // - Description: Your account is not signed up for the Amazon S3 service. You // must sign up before you can use Amazon S3. You can sign up at the following URL: - // Amazon S3 (http://aws.amazon.com/s3) + // [Amazon S3] + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: OperationAborted + // // - Description: A conflicting conditional action is currently in progress // against this resource. Try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: PermanentRedirect + // // - Description: The bucket you are attempting to access must be addressed // using the specified endpoint. Send all future requests to this endpoint. + // // - HTTP Status Code: 301 Moved Permanently + // // - SOAP Fault Code Prefix: Client + // // - Code: PreconditionFailed + // // - Description: At least one of the preconditions you specified did not hold. + // // - HTTP Status Code: 412 Precondition Failed + // // - SOAP Fault Code Prefix: Client + // // - Code: Redirect + // // - Description: Temporary redirect. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: RestoreAlreadyInProgress + // // - Description: Object restore is already in progress. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestIsNotMultiPartContent + // // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeout + // // - Description: Your socket connection to the server was not read from or // written to within the timeout period. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeTooSkewed + // // - Description: The difference between the request time and the server's time // is too large. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTorrentOfBucketError + // // - Description: Requesting the torrent file of a bucket is not permitted. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: SignatureDoesNotMatch + // // - Description: The request signature we calculated does not match the // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. + // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: ServiceUnavailable + // // - Description: Service is unable to handle request. + // // - HTTP Status Code: 503 Service Unavailable + // // - SOAP Fault Code Prefix: Server + // // - Code: SlowDown + // // - Description: Reduce your request rate. + // // - HTTP Status Code: 503 Slow Down + // // - SOAP Fault Code Prefix: Server + // // - Code: TemporaryRedirect + // // - Description: You are being redirected to the bucket while DNS updates. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: TokenRefreshRequired + // // - Description: The provided token must be refreshed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: TooManyBuckets + // // - Description: You have attempted to create more buckets than allowed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnexpectedContent + // // - Description: This request does not support content. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnresolvableGrantByEmailAddress + // // - Description: The email address you provided does not match any account on // record. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UserKeyMustBeSpecified + // // - Description: The bucket POST must contain the specified field name. If it // is specified, check the order of the fields. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // + // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Amazon S3]: http://aws.amazon.com/s3 + // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html Code *string // The error key. @@ -1240,21 +1709,104 @@ type Error struct { // error message. Message *string - // The version ID of the error. This functionality is not supported for directory - // buckets. + // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } +// If the CreateBucketMetadataTableConfiguration request succeeds, but S3 +// +// Metadata was unable to create the table, this structure contains the error code +// and error message. +type ErrorDetails struct { + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code. + // The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorCode *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error + // message. The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorMessage *string + + noSmithyDocumentSerde +} + // The error information. type ErrorDocument struct { - // The object key name to use when a 4XX class error occurs. Replacement must be - // made for object keys containing special characters (such as carriage returns) - // when using XML requests. For more information, see XML related object key - // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string @@ -1267,9 +1819,12 @@ type EventBridgeConfiguration struct { noSmithyDocumentSerde } -// Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 User Guide. +// Optional configuration to replicate existing source bucket objects. +// +// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in +// the Amazon S3 User Guide. +// +// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html type ExistingObjectReplication struct { // Specifies whether Amazon S3 replicates existing source bucket objects. @@ -1293,9 +1848,10 @@ type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the + // Amazon S3 User Guide. + // + // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html Name FilterRuleName // The value that the filter searches for in object key names. @@ -1304,6 +1860,36 @@ type FilterRule struct { noSmithyDocumentSerde } +// The metadata table configuration for a general purpose bucket. +type GetBucketMetadataTableConfigurationResult struct { + + // The metadata table configuration for a general purpose bucket. + // + // This member is required. + MetadataTableConfigurationResult *MetadataTableConfigurationResult + + // The status of the metadata table. The status values are: + // + // - CREATING - The metadata table is in the process of being created in the + // specified table bucket. + // + // - ACTIVE - The metadata table has been created successfully and records are + // being delivered to the table. + // + // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is + // unable to deliver records. See ErrorDetails for details. + // + // This member is required. + Status *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code + // and error message. + Error *ErrorDetails + + noSmithyDocumentSerde +} + // A collection of parts associated with a multipart upload. type GetObjectAttributesParts struct { @@ -1325,10 +1911,12 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // // - General purpose buckets - For GetObjectAttributes , if a additional checksum // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the // request, the response doesn't return Part . + // // - Directory buckets - For GetObjectAttributes , no matter whether a additional // checksum is applied to the object specified in the request, the response returns // Part . @@ -1374,19 +1962,31 @@ type Grantee struct { // Screen name of the grantee. DisplayName *string - // Email address of the grantee. Using email addresses to specify a grantee is - // only supported in the following Amazon Web Services Regions: + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) - // For a list of all the Amazon S3 supported Regions and endpoints, see Regions - // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the + // Amazon Web Services General Reference. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region EmailAddress *string // The canonical user ID of the grantee. @@ -1402,13 +2002,15 @@ type Grantee struct { type IndexDocument struct { // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request to - // samplebucket/images/ the data that is returned will be for the object with the - // key name images/index.html) The suffix must not be empty and must not include a - // slash character. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // endpoint. (For example, if the suffix is index.html and you make a request to + // samplebucket/images/ , the data that is returned will be for the object with the + // key name images/index.html .) The suffix must not be empty and must not include + // a slash character. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Suffix *string @@ -1419,12 +2021,14 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. This functionality is not supported for directory - // buckets. + // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // // Directory buckets - If the principal is an Amazon Web Services account, it // provides the Amazon Web Services account ID. If the principal is an IAM User, it // provides a user ARN value. @@ -1467,10 +2071,11 @@ type IntelligentTieringAndOperator struct { noSmithyDocumentSerde } -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For -// information about the S3 Intelligent-Tiering storage class, see Storage class -// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access type IntelligentTieringConfiguration struct { // The ID used to identify the S3 Intelligent-Tiering configuration. @@ -1505,10 +2110,12 @@ type IntelligentTieringFilter struct { And *IntelligentTieringAndOperator // An object key name prefix that identifies the subset of objects to which the - // rule applies. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints Prefix *string // A container of a key value name pair. @@ -1518,8 +2125,9 @@ type IntelligentTieringFilter struct { } // Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon S3 API Reference. +// information, see [GET Bucket inventory]in the Amazon S3 API Reference. +// +// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html type InventoryConfiguration struct { // Contains information about where to publish the inventory results. @@ -1614,9 +2222,10 @@ type InventoryS3BucketDestination struct { Format InventoryFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. AccountId *string // Contains the type of server-side encryption used to encrypt the inventory @@ -1663,8 +2272,9 @@ type JSONOutput struct { type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -1676,8 +2286,9 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -1687,13 +2298,18 @@ type LambdaFunctionConfiguration struct { noSmithyDocumentSerde } -// Container for the expiration for the lifecycle of the object. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// Container for the expiration for the lifecycle of the object. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleExpiration struct { // Indicates at what date the object is to be moved or deleted. The date value // must conform to the ISO 8601 format. The time is always midnight UTC. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Date *time.Time // Indicates the lifetime, in days, of the objects that are subject to the rule. @@ -1704,14 +2320,19 @@ type LifecycleExpiration struct { // versions. If set to true, the delete marker will be expired; if set to false the // policy takes no action. This cannot be specified with Days or Date in a // Lifecycle Expiration Policy. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpiredObjectDeleteMarker *bool noSmithyDocumentSerde } -// A lifecycle rule for individual objects in an Amazon S3 bucket. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleRule struct { // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is @@ -1722,9 +2343,9 @@ type LifecycleRule struct { // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. + // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload // Specifies the expiration for the lifecycle of the object in the form of date, @@ -1732,9 +2353,12 @@ type LifecycleRule struct { Expiration *LifecycleExpiration // The Filter is used to identify objects that a Lifecycle Rule applies to. A - // Filter must have exactly one of Prefix , Tag , or And specified. Filter is - // required if the LifecycleRule does not contain a Prefix element. - Filter LifecycleRuleFilter + // Filter must have exactly one of Prefix , Tag , ObjectSizeGreaterThan , + // ObjectSizeLessThan , or And specified. Filter is required if the LifecycleRule + // does not contain a Prefix element. + // + // Tag filters are not supported for directory buckets. + Filter *LifecycleRuleFilter // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string @@ -1744,6 +2368,9 @@ type LifecycleRule struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionExpiration *NoncurrentVersionExpiration // Specifies the transition rule for the lifecycle rule that describes when @@ -1751,18 +2378,26 @@ type LifecycleRule struct { // versioning-enabled (or versioning is suspended), you can set this action to // request that Amazon S3 transition noncurrent object versions to a specific // storage class at a set period in the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionTransitions []NoncurrentVersionTransition // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. Replacement must be made for object keys - // containing special characters (such as carriage returns) when using XML - // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string // Specifies when an Amazon S3 object transitions to a specified storage class. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Transitions []Transition noSmithyDocumentSerde @@ -1793,79 +2428,51 @@ type LifecycleRuleAndOperator struct { // Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , // ObjectSizeLessThan , or And specified. If the Filter element is left empty, the // Lifecycle Rule applies to all objects in the bucket. +type LifecycleRuleFilter struct { + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more + // predicates. The Lifecycle Rule will apply to any object matching all of the + // predicates configured inside the And operator. + And *LifecycleRuleAndOperator + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string + + // This tag must exist in the object's tag set in order for the rule to apply. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + Tag *Tag + + noSmithyDocumentSerde +} + +// Specifies the location where the bucket will be created. // -// The following types satisfy this interface: +// For directory buckets, the location type is Availability Zone or Local Zone. +// For more information about directory buckets, see [Working with directory buckets]in the Amazon S3 User Guide. // -// LifecycleRuleFilterMemberAnd -// LifecycleRuleFilterMemberObjectSizeGreaterThan -// LifecycleRuleFilterMemberObjectSizeLessThan -// LifecycleRuleFilterMemberPrefix -// LifecycleRuleFilterMemberTag -type LifecycleRuleFilter interface { - isLifecycleRuleFilter() -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more -// predicates. The Lifecycle Rule will apply to any object matching all of the -// predicates configured inside the And operator. -type LifecycleRuleFilterMemberAnd struct { - Value LifecycleRuleAndOperator - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} - -// Minimum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { - Value int64 - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} - -// Maximum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeLessThan struct { - Value int64 - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} - -// Prefix identifying one or more objects to which the rule applies. Replacement -// must be made for object keys containing special characters (such as carriage -// returns) when using XML requests. For more information, see XML related object -// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type LifecycleRuleFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} - -// This tag must exist in the object's tag set in order for the rule to apply. -type LifecycleRuleFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} - -// Specifies the location where the bucket will be created. For directory buckets, -// the location type is Availability Zone. For more information about directory -// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// This functionality is only supported by directory buckets. +// +// [Working with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type LocationInfo struct { - // The name of the location where the bucket will be created. For directory - // buckets, the name of the location is the AZ ID of the Availability Zone where - // the bucket will be created. An example AZ ID value is usw2-az1 . + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the Zone ID of the + // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An + // example AZ ID value is usw2-az1 . Name *string // The type of location where the bucket will be created. @@ -1875,8 +2482,10 @@ type LocationInfo struct { } // Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon S3 API Reference. +// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API +// Reference. +// +// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. You @@ -1896,10 +2505,12 @@ type LoggingEnabled struct { // This member is required. TargetPrefix *string - // Container for granting information. Buckets that use the bucket owner enforced - // setting for Object Ownership don't support target grants. For more information, - // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) - // in the Amazon S3 User Guide. + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + // + // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general TargetGrants []TargetGrant // Amazon S3 key format for log objects. @@ -1920,16 +2531,49 @@ type MetadataEntry struct { noSmithyDocumentSerde } -// A container specifying replication metrics-related settings enabling +// The metadata table configuration for a general purpose bucket. +type MetadataTableConfiguration struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestination *S3TablesDestination + + noSmithyDocumentSerde +} + +// The metadata table configuration for a general purpose bucket. The destination +// +// table bucket must be in the same Region and Amazon Web Services account as the +// general purpose bucket. The specified metadata table name must be unique within +// the aws_s3_metadata namespace in the destination table bucket. +type MetadataTableConfigurationResult struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestinationResult *S3TablesDestinationResult + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling +// // replication metrics and events. type Metrics struct { - // Specifies whether the replication metrics are enabled. + // Specifies whether the replication metrics are enabled. // // This member is required. Status MetricsStatus - // A container specifying the time threshold for emitting the + // A container specifying the time threshold for emitting the // s3:Replication:OperationMissedThreshold event. EventThreshold *ReplicationTimeValue @@ -1957,8 +2601,9 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// . +// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html type MetricsConfiguration struct { // The ID used to identify the metrics configuration. The ID has a 64 character @@ -1978,8 +2623,7 @@ type MetricsConfiguration struct { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, an // object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// . +// information, see [PutBucketMetricsConfiguration]. // // The following types satisfy this interface: // @@ -1987,6 +2631,8 @@ type MetricsConfiguration struct { // MetricsFilterMemberAnd // MetricsFilterMemberPrefix // MetricsFilterMemberTag +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html type MetricsFilter interface { isMetricsFilter() } @@ -2035,6 +2681,12 @@ type MultipartUpload struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Date and time at which the multipart upload was initiated. Initiated *time.Time @@ -2045,13 +2697,16 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. - // Directory buckets - The bucket owner is returned as the object owner for all the - // objects. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the objects. Owner *Owner - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2065,20 +2720,31 @@ type MultipartUpload struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. +// +// This parameter applies to general purpose buckets only. It is not supported for +// directory bucket lifecycle configurations. type NoncurrentVersionExpiration struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + // additional noncurrent versions beyond the specified number to retain. For more + // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 + // User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 noSmithyDocumentSerde @@ -2093,18 +2759,20 @@ type NoncurrentVersionExpiration struct { // specific period in the object's lifetime. type NoncurrentVersionTransition struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been - // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. + // + // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 // The class of storage used to store the object. @@ -2136,8 +2804,9 @@ type NotificationConfiguration struct { } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) -// in the Amazon S3 User Guide. +// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. +// +// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html type NotificationConfigurationFilter struct { // A container for object key name prefix and suffix filtering rules. @@ -2152,21 +2821,31 @@ type Object struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is a hash of the object. The ETag reflects changes only to the // contents of an object, not its metadata. The ETag may or may not be an MD5 // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 // or plaintext, have ETags that are an MD5 digest of their object data. + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-C // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // // - If an object is created by either the Multipart Upload or Part Copy // operation, the ETag is not an MD5 digest, regardless of the method of // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, and // therefore the ETag will not be an MD5 digest. + // // Directory buckets - MD5 is not supported by directory buckets. ETag *string @@ -2177,25 +2856,32 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object Directory buckets - The bucket owner is returned as the - // object owner. + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Only the S3 Express One Zone storage class is supported by directory - // buckets to store objects. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in + // Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage + // class) in Dedicated Local Zones. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object Size *int64 - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3 + // Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3 + // One Zone-Infrequent Access storage class) in Dedicated Local Zones. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2204,16 +2890,39 @@ type Object struct { // Object Identifier is unique value to identify objects. type ObjectIdentifier struct { - // Key name of the object. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string - // Version ID for the specific version of the object to delete. This functionality - // is not supported for directory buckets. + // An entity tag (ETag) is an identifier assigned by a web server to a specific + // version of a resource found at a URL. This header field makes the request method + // conditional on ETags . + // + // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings + // unique to the object. + ETag *string + + // If present, the objects are deleted only if its modification times matches the + // provided Timestamp . + // + // This functionality is only supported for directory buckets. + LastModifiedTime *time.Time + + // If present, the objects are deleted only if its size matches the provided size + // in bytes. + // + // This functionality is only supported for directory buckets. + Size *int64 + + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2271,38 +2980,41 @@ type ObjectLockRule struct { // A container for elements related to an individual part. type ObjectPart struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // The part number identifying the part. This value is a positive integer between @@ -2321,6 +3033,12 @@ type ObjectVersion struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is an MD5 hash of that version of the object. ETag *string @@ -2339,9 +3057,10 @@ type ObjectVersion struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object. @@ -2382,14 +3101,23 @@ type Owner struct { // Container for the display name of the owner. This value is only supported in // the following Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) + // // This functionality is not supported for directory buckets. DisplayName *string @@ -2414,23 +3142,30 @@ type OwnershipControls struct { type OwnershipControlsRule struct { // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html // // This member is required. ObjectOwnership ObjectOwnership @@ -2446,36 +3181,41 @@ type ParquetInput struct { // Container for elements related to a part. type Part struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the object was uploaded with the CRC32 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the object was uploaded with the CRC32C checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the object was uploaded with the SHA1 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the object was uploaded with the SHA256 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -2495,13 +3235,22 @@ type Part struct { } // Amazon S3 keys for log objects are partitioned in the following format: -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// // PartitionedPrefix defaults to EventTime delivery when server access logs are // delivered. type PartitionedPrefix struct { // Specifies the partition date source for the partitioned prefix. - // PartitionDateSource can be EventTime or DeliveryTime. + // PartitionDateSource can be EventTime or DeliveryTime . + // + // For DeliveryTime , the time in the log file names corresponds to the delivery + // time for the log files. + // + // For EventTime , The logs delivered are for a specific day only. The year, month, + // and day correspond to the day on which the event occurred, and the hour, minutes + // and seconds are set to 00 in the key. PartitionDateSource PartitionDateSource noSmithyDocumentSerde @@ -2543,41 +3292,48 @@ type ProgressEvent struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon S3 User Guide. +// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in +// the Amazon S3 User Guide. +// +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should block public access control lists (ACLs) for // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is - // public. + // + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. + // // - PUT Object calls fail if the request includes a public ACL. + // // - PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. Enabling this - // setting doesn't affect existing bucket policies. + // Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. Enabling this setting - // doesn't affect the persistence of any existing ACLs and doesn't prevent new - // public ACLs from being set. + // all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs and + // doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Service principals and authorized users within this account if the - // bucket has a public policy. Enabling this setting doesn't affect previously - // stored bucket policies, except that public and cross-account access within any - // public bucket policy, including non-public delegation to specific accounts, is - // blocked. + // Amazon Web Services service principals and authorized users within this account + // if the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool noSmithyDocumentSerde @@ -2599,8 +3355,9 @@ type QueueConfiguration struct { QueueArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -2613,7 +3370,14 @@ type QueueConfiguration struct { // The container for the records event. type RecordsEvent struct { - // The byte array of partial, one or more result records. + // The byte array of partial, one or more result records. S3 Select doesn't + // guarantee that a record will be self-contained in one record frame. To ensure + // continuous streaming of data, S3 Select might split the same record across + // multiple record frames instead of aggregating the results in memory. Some S3 + // clients (for example, the SDKforJava) handle this behavior by creating a + // ByteStream out of the response by default. Other clients might not handle this + // behavior by default. In those cases, you must aggregate the results on the + // client side and parse the response. Payload []byte noSmithyDocumentSerde @@ -2639,18 +3403,22 @@ type Redirect struct { // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. Replacement must be made for object keys containing special characters - // (such as carriage returns) when using XML requests. For more information, see - // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyPrefixWith *string // The specific object key to use in the redirect request. For example, redirect // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made - // for object keys containing special characters (such as carriage returns) when - // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyWith *string noSmithyDocumentSerde @@ -2676,9 +3444,11 @@ type RedirectAllRequestsTo struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications -// on replicas. If you don't specify the Filter element, Amazon S3 assumes that -// the replication configuration is the earlier version, V1. In the earlier -// version, this element is not allowed. +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. type ReplicaModifications struct { // Specifies whether Amazon S3 replicates modifications on replicas. @@ -2694,9 +3464,10 @@ type ReplicaModifications struct { type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see How - // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon S3 User Guide. + // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in + // the Amazon S3 User Guide. + // + // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html // // This member is required. Role *string @@ -2729,34 +3500,41 @@ type ReplicationRule struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example - // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) - // . For more information about delete marker replication, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) - // . If you are using an earlier version of the replication configuration, Amazon - // S3 handles replication of delete markers differently. For more information, see - // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) - // . + // configuration, see [Basic Rule Configuration]. + // + // For more information about delete marker replication, see [Basic Rule Configuration]. + // + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see [Backward Compatibility]. + // + // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations DeleteMarkerReplication *DeleteMarkerReplication - // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 User Guide. + // Optional configuration to replicate existing source bucket objects. + // + // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in + // the Amazon S3 User Guide. + // + // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html ExistingObjectReplication *ExistingObjectReplication // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. - Filter ReplicationRuleFilter + Filter *ReplicationRuleFilter // A unique identifier for the rule. The maximum value is 255 characters. ID *string // An object key name prefix that identifies the object or objects to which the // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. Replacement must be made for - // object keys containing special characters (such as carriage returns) when using - // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string @@ -2766,8 +3544,10 @@ type ReplicationRule struct { // according to all replication rules. However, if there are two or more rules with // the same destination bucket, then objects will be replicated according to the // rule with the highest priority. The higher the number, the higher the priority. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon S3 User Guide. + // + // For more information, see [Replication] in the Amazon S3 User Guide. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html Priority *int32 // A container that describes additional filters for identifying the source @@ -2782,9 +3562,13 @@ type ReplicationRule struct { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: +// more than one filter. +// +// For example: +// // - If you specify both a Prefix and a Tag filter, wrap these filters in an And // tag. +// // - If you specify a filter based on multiple tags, wrap the Tag elements in an // And tag. type ReplicationRuleAndOperator struct { @@ -2802,67 +3586,50 @@ type ReplicationRuleAndOperator struct { // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. +type ReplicationRuleFilter struct { + + // A container for specifying rule filters. The filters determine the subset of + // objects to which the rule applies. This element is required only if you specify + // more than one filter. For example: + // + // - If you specify both a Prefix and a Tag filter, wrap these filters in an And + // tag. + // + // - If you specify a filter based on multiple tags, wrap the Tag elements in an + // And tag. + And *ReplicationRuleAndOperator + + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag + + noSmithyDocumentSerde +} + +// A container specifying S3 Replication Time Control (S3 RTC) related // -// The following types satisfy this interface: -// -// ReplicationRuleFilterMemberAnd -// ReplicationRuleFilterMemberPrefix -// ReplicationRuleFilterMemberTag -type ReplicationRuleFilter interface { - isReplicationRuleFilter() -} - -// A container for specifying rule filters. The filters determine the subset of -// objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: -// - If you specify both a Prefix and a Tag filter, wrap these filters in an And -// tag. -// - If you specify a filter based on multiple tags, wrap the Tag elements in an -// And tag. -type ReplicationRuleFilterMemberAnd struct { - Value ReplicationRuleAndOperator - - noSmithyDocumentSerde -} - -func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} - -// An object key name prefix that identifies the subset of objects to which the -// rule applies. Replacement must be made for object keys containing special -// characters (such as carriage returns) when using XML requests. For more -// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type ReplicationRuleFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} - -func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} - -// A container for specifying a tag key and value. The rule applies only to -// objects that have the tag in their tag set. -type ReplicationRuleFilterMemberTag struct { - Value Tag - - noSmithyDocumentSerde -} - -func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} - -// A container specifying S3 Replication Time Control (S3 RTC) related // information, including whether S3 RTC is enabled and the time when all objects // and operations on objects must be replicated. Must be specified together with a // Metrics block. type ReplicationTime struct { - // Specifies whether the replication time is enabled. + // Specifies whether the replication time is enabled. // // This member is required. Status ReplicationTimeStatus - // A container specifying the time by which replication should be complete for all - // objects and operations on objects. + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. // // This member is required. Time *ReplicationTimeValue @@ -2870,11 +3637,14 @@ type ReplicationTime struct { noSmithyDocumentSerde } -// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// // and replication metrics EventThreshold . type ReplicationTimeValue struct { - // Contains an integer specifying time in minutes. Valid value: 15 + // Contains an integer specifying time in minutes. + // + // Valid value: 15 Minutes *int32 noSmithyDocumentSerde @@ -2905,8 +3675,10 @@ type RequestProgress struct { type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . The Days element is required for regular restores, and must not - // be provided for select requests. + // OutputLocation . + // + // The Days element is required for regular restores, and must not be provided for + // select requests. Days *int32 // The optional description for the job. @@ -2919,13 +3691,23 @@ type RestoreRequest struct { // Describes the location where the restore job's output is stored. OutputLocation *OutputLocation + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Describes the parameters for Select job types. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ SelectParameters *SelectParameters // Retrieval tier at which the restore will be processed. Tier Tier + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Type of restore request. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ Type RestoreRequestType noSmithyDocumentSerde @@ -2933,34 +3715,45 @@ type RestoreRequest struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see Working -// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. Only the S3 Express One Zone storage class is supported by directory -// buckets to store objects. +// about these storage classes and how to work with archived objects, see [Working with archived objects]in the +// Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// only support EXPRESS_ONEZONE (the S3 Express One Zone storage class) in +// Availability Zones and ONEZONE_IA (the S3 One Zone-Infrequent Access storage +// class) in Dedicated Local Zones. +// +// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html type RestoreStatus struct { // Specifies whether the object is currently being restored. If the object // restoration is in progress, the header returns the value TRUE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object - // restoration has completed, the header returns the value FALSE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, - // there is no header response. + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE . + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. IsRestoreInProgress *bool // Indicates when the restored copy will expire. This value is populated only if // the object has already been restored. For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" RestoreExpiryDate *time.Time noSmithyDocumentSerde } // Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon S3 User Guide. +// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. +// +// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects type RoutingRule struct { // Container for redirect information. You can redirect requests to another host, @@ -3024,6 +3817,69 @@ type S3Location struct { noSmithyDocumentSerde } +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestination struct { + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestinationResult struct { + + // The Amazon Resource Name (ARN) for the metadata table in the metadata table + // configuration. The specified metadata table name must be unique within the + // aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + TableArn *string + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + // The table bucket namespace for the metadata table in your metadata table + // configuration. This value is always aws_s3_metadata . + // + // This member is required. + TableNamespace *string + + noSmithyDocumentSerde +} + // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section @@ -3103,11 +3959,26 @@ type SelectObjectContentEventStreamMemberStats struct { func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} +// Amazon S3 Select is no longer available to new customers. Existing customers of +// Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Describes the parameters for Select job types. +// +// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html +// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html type SelectParameters struct { + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // The expression that is used to query the object. // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + // // This member is required. Expression *string @@ -3131,34 +4002,73 @@ type SelectParameters struct { // Describes the default server-side encryption to apply to new objects in the // bucket. If a PUT Object request doesn't specify any server-side encryption, this -// default encryption will be applied. If you don't specify a customer managed key -// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key -// in your Amazon Web Services account the first time that you add an object -// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for -// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon S3 API Reference. +// default encryption will be applied. For more information, see [PutBucketEncryption]. +// +// - General purpose buckets - If you don't specify a customer managed key at +// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( +// aws/s3 ) in your Amazon Web Services account the first time that you add an +// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS +// key for SSE-KMS. +// +// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per +// directory bucket's lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. +// +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk type ServerSideEncryptionByDefault struct { // Server-side encryption algorithm to use for the default encryption. // + // For directory buckets, there are only two supported values for server-side + // encryption: AES256 and aws:kms . + // // This member is required. SSEAlgorithm ServerSideEncryption - // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services - // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key - // ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. + // Amazon Web Services Key Management Service (KMS) customer managed key ID to use + // for the default encryption. + // + // - General purpose buckets - This parameter is allowed if and only if + // SSEAlgorithm is set to aws:kms or aws:kms:dsse . + // + // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is + // set to aws:kms . + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the + // KMS key. + // // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key Alias: alias/alias-name - // If you use a key ID, you can run into a LogDestination undeliverable error when - // creating a VPC flow log. If you are using encryption with cross-account or - // Amazon Web Services service operations you must use a fully qualified KMS key - // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) - // . Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations, you must use a fully qualified KMS key ARN. For more information, + // see [Using encryption for cross-account operations]. + // + // - General purpose buckets - If you're specifying a customer managed KMS key, + // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias + // instead, then KMS resolves the key within the requester’s account. This behavior + // can result in data that's encrypted with a KMS key that belongs to the + // requester, and not the bucket owner. Also, if you use a key ID, you can run into + // a LogDestination undeliverable error when creating a VPC flow log. + // + // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory + // bucket, only use the key ID or key ARN. The key alias format of the KMS key + // isn't supported. + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSMasterKeyID *string noSmithyDocumentSerde @@ -3177,6 +4087,18 @@ type ServerSideEncryptionConfiguration struct { } // Specifies the default server-side encryption configuration. +// +// - General purpose buckets - If you're specifying a customer managed KMS key, +// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias +// instead, then KMS resolves the key within the requester’s account. This behavior +// can result in data that's encrypted with a KMS key that belongs to the +// requester, and not the bucket owner. +// +// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory +// bucket, only use the key ID or key ARN. The key alias format of the KMS key +// isn't supported. +// +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk type ServerSideEncryptionRule struct { // Specifies the default server-side encryption to apply to new objects in the @@ -3187,17 +4109,33 @@ type ServerSideEncryptionRule struct { // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more - // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. + // to use an S3 Bucket Key. + // + // - General purpose buckets - By default, S3 Bucket Key is not enabled. For + // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. + // + // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool noSmithyDocumentSerde } -// The established temporary security credentials of the session. Directory -// buckets - These session credentials are only supported for the authentication -// and authorization of Zonal endpoint APIs on directory buckets. +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint API operations on directory +// buckets. type SessionCredentials struct { // A unique identifier that's associated with a secret access key. The access key @@ -3233,7 +4171,9 @@ type SessionCredentials struct { } // To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] type SimplePrefix struct { noSmithyDocumentSerde } @@ -3249,12 +4189,14 @@ type SourceSelectionCriteria struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications - // on replicas. If you don't specify the Filter element, Amazon S3 assumes that - // the replication configuration is the earlier version, V1. In the earlier - // version, this element is not allowed + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed ReplicaModifications *ReplicaModifications - // A container for filter information for the selection of Amazon S3 objects + // A container for filter information for the selection of Amazon S3 objects // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria // in the replication configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects @@ -3372,10 +4314,12 @@ type Tagging struct { noSmithyDocumentSerde } -// Container for granting information. Buckets that use the bucket owner enforced -// setting for Object Ownership don't support target grants. For more information, -// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. +// +// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general type TargetGrant struct { // Container for the person being granted permissions. @@ -3406,9 +4350,10 @@ type TargetObjectKeyFormat struct { // without additional operational overhead. type Tiering struct { - // S3 Intelligent-Tiering access tier. See Storage class for automatically - // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) - // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 + // Intelligent-Tiering storage class. + // + // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access // // This member is required. AccessTier IntelligentTieringAccessTier @@ -3431,8 +4376,9 @@ type Tiering struct { type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -3444,8 +4390,9 @@ type TopicConfiguration struct { TopicArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3456,9 +4403,10 @@ type TopicConfiguration struct { } // Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon S3 User Guide. +// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 +// User Guide. +// +// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html type Transition struct { // Indicates when objects are transitioned to the specified storage class. The @@ -3466,7 +4414,15 @@ type Transition struct { Date *time.Time // Indicates the number of days after creation when objects are transitioned to - // the specified storage class. The value must be a positive integer. + // the specified storage class. If the specified storage class is + // INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE , valid values are + // 0 or positive integers. If the specified storage class is STANDARD_IA or + // ONEZONE_IA , valid values are positive integers greater than 30 . Be aware that + // some storage classes have a minimum storage duration and that you're charged for + // transitioning objects before their minimum storage duration. For more + // information, see [Constraints and considerations for transitions]in the Amazon S3 User Guide. + // + // [Constraints and considerations for transitions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html#lifecycle-configuration-constraints Days *int32 // The storage class to which you want the object to transition. @@ -3476,8 +4432,9 @@ type Transition struct { } // Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon S3 API Reference. +// see [PUT Bucket versioning]in the Amazon S3 API Reference. +// +// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html type VersioningConfiguration struct { // Specifies whether MFA delete is enabled in the bucket versioning configuration. @@ -3500,8 +4457,9 @@ type WebsiteConfiguration struct { // The name of the index document for the website. IndexDocument *IndexDocument - // The redirect behavior for every request to this bucket's website endpoint. If - // you specify this property, you can't specify any other property. + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo // Rules that define when a redirect is applied and the redirect behavior. @@ -3522,7 +4480,5 @@ type UnknownUnionMember struct { } func (*UnknownUnionMember) isAnalyticsFilter() {} -func (*UnknownUnionMember) isLifecycleRuleFilter() {} func (*UnknownUnionMember) isMetricsFilter() {} -func (*UnknownUnionMember) isReplicationRuleFilter() {} func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go new file mode 100644 index 0000000000..0e664c59ce --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go @@ -0,0 +1,23 @@ +package s3 + +// This contains helper methods to set resolver URI into the context object. If they are ever used for +// something other than S3, they should be moved to internal/context/context.go + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type s3resolvedURI struct{} + +// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2 +func setS3ResolvedURI(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, s3resolvedURI{}, value) +} + +// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2 +func getS3ResolvedURI(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go index e954b302df..97a56bd313 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -90,6 +90,26 @@ func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middle return next.HandleInitialize(ctx, in) } +type validateOpCreateBucketMetadataTableConfiguration struct { +} + +func (*validateOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateMultipartUpload struct { } @@ -270,6 +290,26 @@ func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteBucketMetricsConfiguration struct { } @@ -670,6 +710,26 @@ func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpGetBucketMetadataTableConfiguration struct { +} + +func (*validateOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetBucketMetricsConfiguration struct { } @@ -1886,6 +1946,10 @@ func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) } +func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) } @@ -1922,6 +1986,10 @@ func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) } +func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) } @@ -2002,6 +2070,10 @@ func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) } +func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) } @@ -2912,22 +2984,20 @@ func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { } } -func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { +func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.LifecycleRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateLifecycleRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -2976,6 +3046,25 @@ func validateLoggingEnabled(v *types.LoggingEnabled) error { } } +func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"} + if v.S3TablesDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination")) + } else if v.S3TablesDestination != nil { + if err := validateS3TablesDestination(v.S3TablesDestination); err != nil { + invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateMetrics(v *types.Metrics) error { if v == nil { return nil @@ -3320,22 +3409,20 @@ func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) err } } -func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { +func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.ReplicationRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateReplicationRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -3486,6 +3573,24 @@ func validateS3Location(v *types.S3Location) error { } } +func validateS3TablesDestination(v *types.S3TablesDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"} + if v.TableBucketArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSelectParameters(v *types.SelectParameters) error { if v == nil { return nil @@ -3937,6 +4042,28 @@ func validateOpCreateBucketInput(v *CreateBucketInput) error { } } +func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.MetadataTableConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration")) + } else if v.MetadataTableConfiguration != nil { + if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil { + invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { if v == nil { return nil @@ -4084,6 +4211,21 @@ func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { } } +func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { if v == nil { return nil @@ -4409,6 +4551,21 @@ func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { } } +func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 4a3e25ac1f..3be25b8bec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,188 @@ +# v1.25.3 (2025-04-03) + +* No change notes available for this release. + +# v1.25.2 (2025-03-25) + +* No change notes available for this release. + +# v1.25.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.25.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.16 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.15 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.14 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.13 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.12 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.24.11 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.24.10 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.9 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.8 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.7 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-11-07) + +* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses + +# v1.24.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.4 (2024-10-03) + +* No change notes available for this release. + +# v1.23.3 (2024-09-27) + +* No change notes available for this release. + +# v1.23.2 (2024-09-25) + +* No change notes available for this release. + +# v1.23.1 (2024-09-23) + +* No change notes available for this release. + +# v1.23.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.22.7 (2024-09-04) + +* No change notes available for this release. + +# v1.22.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.4 (2024-07-18) + +* No change notes available for this release. + +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-05-23) + +* No change notes available for this release. + +# v1.20.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.20.5 (2024-04-05) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index fff457735b..9f10e65ad7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -4,6 +4,7 @@ package sso import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -14,22 +15,157 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "SSO" const ServiceAPIVersion = "2019-06-10" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso") +} + // Client provides the API client to make operations call for AWS Single Sign-On. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -50,6 +186,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -68,6 +208,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -80,8 +222,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -105,15 +254,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -151,7 +341,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -425,6 +615,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -441,11 +655,36 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -484,6 +723,87 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -535,3 +855,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 4b21e8b00a..b8031eeea3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -30,9 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -101,6 +102,9 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -113,6 +117,15 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { return err } @@ -134,6 +147,18 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index e44da697c5..4294e4d3c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -29,9 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -106,6 +107,9 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -118,6 +122,15 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { return err } @@ -139,17 +152,21 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - // ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles type ListAccountRolesPaginatorOptions struct { // The number of items that clients can request per page. @@ -213,6 +230,9 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -232,6 +252,14 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func return result, nil } +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 2d7add067f..1db72a995e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,9 +12,10 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. +// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity +// Center User Guide. This operation returns a paginated response. +// +// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { if params == nil { params = &ListAccountsInput{} @@ -32,9 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -104,6 +106,9 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -116,6 +121,15 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAccountsValidationMiddleware(stack); err != nil { return err } @@ -137,16 +151,21 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - // ListAccountsPaginatorOptions is the paginator options for ListAccounts type ListAccountsPaginatorOptions struct { // This is the number of items clients can request per page. @@ -210,6 +229,9 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +251,13 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op return result, nil } +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 3ee682d19e..2ca66ca509 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -12,16 +12,20 @@ import ( // Removes the locally stored SSO tokens from the client-side cache and sends an // API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. If a user uses IAM Identity -// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is -// used to obtain an IAM session, as specified in the corresponding IAM Identity -// Center permission set. More specifically, IAM Identity Center assumes an IAM -// role in the target account on behalf of the user, and the corresponding -// temporary AWS credentials are returned to the client. After user logout, any -// existing IAM role sessions that were created by using IAM Identity Center -// permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, IAM +// Identity Center assumes an IAM role in the target account on behalf of the user, +// and the corresponding temporary AWS credentials are returned to the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured in +// the permission set. For more information, see [User authentications]in the IAM Identity Center User +// Guide. +// +// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -39,9 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -96,6 +101,9 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -108,6 +116,15 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpLogoutValidationMiddleware(stack); err != nil { return err } @@ -129,6 +146,18 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index 3b28e825dd..366963b49f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -8,11 +8,13 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +92,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -169,7 +171,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -181,6 +186,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -240,7 +248,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -250,12 +261,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -271,6 +290,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -280,6 +300,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -300,9 +323,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 8bba205f43..ec23c36f5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -13,12 +13,23 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpGetRoleCredentials struct { } @@ -34,6 +45,10 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -73,6 +88,7 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -190,6 +206,10 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -229,6 +249,7 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -355,6 +376,10 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -394,6 +419,7 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C } } + span.End() return out, metadata, err } @@ -520,6 +546,10 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -537,6 +567,7 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context } } + span.End() return out, metadata, err } @@ -858,7 +889,7 @@ func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRe for key, value := range shape { switch key { - case "message": + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { @@ -898,7 +929,7 @@ func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.Resourc for key, value := range shape { switch key { - case "message": + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { @@ -1092,7 +1123,7 @@ func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyR for key, value := range shape { switch key { - case "message": + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { @@ -1132,7 +1163,7 @@ func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.Unauthorize for key, value := range shape { switch key { - case "message": + case "message", "Message": if value != nil { jtv, ok := value.(string) if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index 59456d5dc2..7f6e429fda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -6,16 +6,22 @@ // AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web // service that makes it easy for you to assign user access to IAM Identity Center // resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. Although AWS -// Single Sign-On was renamed, the sso and identitystore API namespaces will -// continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . This reference guide describes the IAM Identity Center Portal operations that +// and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API +// namespaces will continue to retain their original name for backward +// compatibility purposes. For more information, see [IAM Identity Center rename]. +// +// This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and -// errors. AWS provides SDKs that consist of libraries and sample code for various +// errors. +// +// AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// . +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 76521eec0e..53c6bc7561 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -288,6 +289,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +477,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -491,6 +503,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -504,12 +519,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -531,5 +551,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index 936253d7ca..1a88fe4df8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -25,11 +25,12 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/sso", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 44379817e8..59aa2aebdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.5" +const goModuleVersion = "1.25.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index 2c3a77ce30..04416606be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -87,6 +87,7 @@ func New() *Resolver { var partitionRegexp = struct { Aws *regexp.Regexp AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp AwsIsoE *regexp.Regexp @@ -94,8 +95,9 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), @@ -227,6 +229,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-4", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -235,6 +245,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -418,6 +436,27 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, { ID: "aws-iso", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 5dee7e53f4..aa744f1594 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,8 +52,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -64,23 +68,29 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -95,10 +105,14 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +157,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go index 02e3141156..a7a5b57de0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -8,6 +8,7 @@ import ( smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/encoding/httpbinding" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -21,6 +22,10 @@ func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -57,6 +62,8 @@ func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { @@ -64,7 +71,7 @@ func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCrede return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -90,6 +97,10 @@ func (*awsRestjson1_serializeOpListAccountRoles) ID() string { func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -126,6 +137,8 @@ func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { @@ -133,7 +146,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRol return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -163,6 +176,10 @@ func (*awsRestjson1_serializeOpListAccounts) ID() string { func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -199,6 +216,8 @@ func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { @@ -206,7 +225,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } @@ -232,6 +251,10 @@ func (*awsRestjson1_serializeOpLogout) ID() string { func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -268,6 +291,8 @@ func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { @@ -275,7 +300,7 @@ func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *ht return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AccessToken != nil && len(*v.AccessToken) > 0 { + if v.AccessToken != nil { locationName := "X-Amz-Sso_bearer_token" encoder.SetHeader(locationName).String(*v.AccessToken) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 8dc02296b1..07ac468e31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -25,22 +25,24 @@ type AccountInfo struct { type RoleCredentials struct { // The identifier used for the temporary security credentials. For more - // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html AccessKeyId *string // The date on which temporary security credentials expire. Expiration int64 - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SecretAccessKey *string - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SessionToken *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 053f180bf6..b4cdac6b38 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,193 @@ +# v1.30.1 (2025-04-03) + +* No change notes available for this release. + +# v1.30.0 (2025-03-27) + +* **Feature**: This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response. + +# v1.29.2 (2025-03-24) + +* No change notes available for this release. + +# v1.29.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.29.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.15 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.14 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2025-01-24) + +* **Documentation**: Fixed typos in the descriptions. +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.28.10 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.28.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2024-10-03) + +* No change notes available for this release. + +# v1.27.3 (2024-09-27) + +* No change notes available for this release. + +# v1.27.2 (2024-09-25) + +* No change notes available for this release. + +# v1.27.1 (2024-09-23) + +* No change notes available for this release. + +# v1.27.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.26.7 (2024-09-04) + +* No change notes available for this release. + +# v1.26.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.4 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2024-07-03) + +* No change notes available for this release. + +# v1.26.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.25.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-05-23) + +* No change notes available for this release. + +# v1.24.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-05-10) + +* **Feature**: Updated request parameters for PKCE support. + +# v1.23.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.23.4 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 8dc643bb0c..57440b1fa8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -4,6 +4,7 @@ package ssooidc import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -14,22 +15,157 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "SSO OIDC" const ServiceAPIVersion = "2019-06-10" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc") +} + // Client provides the API client to make operations call for AWS SSO OIDC. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -50,6 +186,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -68,6 +208,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -80,8 +222,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -105,15 +254,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -151,7 +341,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -425,6 +615,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -441,11 +655,36 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -484,6 +723,87 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -535,3 +855,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 63f1eeb131..493878338e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -12,7 +12,7 @@ import ( // Creates and returns access and refresh tokens for clients that are // authenticated using client secrets. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application +// short-lived credentials for the assigned AWS accounts or to access application // APIs using bearer authentication. func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { if params == nil { @@ -32,34 +32,42 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF type CreateTokenInput struct { // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClient API. + // from the result of the RegisterClientAPI. // // This member is required. ClientId *string // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClient API. + // persisted result of the RegisterClientAPI. // // This member is required. ClientSecret *string - // Supports the following OAuth grant types: Device Code and Refresh Token. - // Specify either of the following values, depending on the grant type that you - // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh - // Token - refresh_token For information about how to obtain the device code, see - // the StartDeviceAuthorization topic. + // Supports the following OAuth grant types: Authorization Code, Device Code, and + // Refresh Token. Specify one of the following values, depending on the grant type + // that you want: + // + // * Authorization Code - authorization_code + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token // // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The - // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateToken API. + // short-lived code is used to identify this authorization request. Code *string - // Used only when calling this API for the Device Code grant type. This short-term - // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorization API. + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + + // Used only when calling this API for the Device Code grant type. This + // short-lived code is used to identify this authorization request. This comes from + // the result of the StartDeviceAuthorizationAPI. DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -68,17 +76,19 @@ type CreateTokenInput struct { RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. + // used to refresh short-lived tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is requested. The access token that // is issued is limited to the scopes that are granted. If this value is not // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient . + // client during the call to RegisterClient. Scope []string noSmithyDocumentSerde @@ -86,7 +96,8 @@ type CreateTokenInput struct { type CreateTokenOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -94,18 +105,22 @@ type CreateTokenOutput struct { // The idToken is not implemented or supported. For more information about the // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A JSON Web Token (JWT) that identifies who is associated with the issued - // access token. + // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. + // + // A JSON Web Token (JWT) that identifies who is associated with the issued access + // token. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html IdToken *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used to notify the client that the returned token is an access token. The @@ -158,6 +173,9 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -170,6 +188,15 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { return err } @@ -191,6 +218,18 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index 6340953894..09f3647e8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -6,14 +6,15 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// short-lived credentials for the assigned Amazon Web Services accounts or to +// access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { params = &CreateTokenWithIAMInput{} @@ -39,10 +40,15 @@ type CreateTokenWithIAMInput struct { // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: * Authorization Code - authorization_code * - // Refresh Token - refresh_token * JWT Bearer - - // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - - // urn:ietf:params:oauth:grant-type:token-exchange + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange // // This member is required. GrantType *string @@ -54,28 +60,38 @@ type CreateTokenWithIAMInput struct { Assertion *string // Used only when calling this API for the Authorization Code grant type. This - // short-term code is used to identify this authorization request. The code is + // short-lived code is used to identify this authorization request. The code is // obtained through a redirect from IAM Identity Center to a redirect URI persisted // in the Authorization Code GrantOptions for the application. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Authorization Code grant type. This // value specifies the location of the client or application that has registered to // receive the authorization code. RedirectUri *string // Used only when calling this API for the Refresh Token grant type. This token is - // used to refresh short-term tokens, such as the access token, that might expire. + // used to refresh short-lived tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that the requester can receive. The following values - // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * - // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + // are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token RequestedTokenType *string // The list of scopes for which authorization is requested. The access token that @@ -94,8 +110,9 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token + // following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token SubjectTokenType *string noSmithyDocumentSerde @@ -103,9 +120,15 @@ type CreateTokenWithIAMInput struct { type CreateTokenWithIAMOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string + // A structure containing information from the idToken . Only the identityContext + // is in it, which is a value extracted from the idToken . This provides direct + // access to identity information without requiring JWT parsing. + AwsAdditionalDetails *types.AwsAdditionalDetails + // Indicates the time in seconds when an access token will expire. ExpiresIn int32 @@ -114,17 +137,21 @@ type CreateTokenWithIAMOutput struct { IdToken *string // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token * Refresh Token - - // urn:ietf:params:oauth:token-type:refresh_token + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token IssuedTokenType *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is granted. The access token that is @@ -184,6 +211,9 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -196,6 +226,15 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { return err } @@ -217,6 +256,18 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 09f016ec1e..1e2d3828f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Registers a client with IAM Identity Center. This allows clients to initiate -// device authorization. The output should be persisted for reuse through many -// authentication requests. +// Registers a public client with IAM Identity Center. This allows clients to +// perform authorization using the authorization code grant with Proof Key for Code +// Exchange (PKCE) or the device code grant. func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { if params == nil { params = &RegisterClientInput{} @@ -41,6 +41,33 @@ type RegisterClientInput struct { // This member is required. ClientType *string + // This IAM Identity Center application ARN is used to define + // administrator-managed configuration for public client access to resources. At + // authorization, the scopes, grants, and redirect URI available to this client + // will be restricted by this application resource. + EntitledApplicationArn *string + + // The list of OAuth 2.0 grant types that are defined by the client. This list is + // used to restrict the token granting flows available to the client. Supports the + // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh + // Token. + // + // * Authorization Code - authorization_code + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + GrantTypes []string + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent can + // be redirected back to. + RedirectUris []string + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []string @@ -116,6 +143,9 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -128,6 +158,15 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { return err } @@ -149,6 +188,18 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index c568805b22..de0108f1f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -30,22 +30,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the - // RegisterClient API operation. + // Identity Center. This value should come from the persisted result of the RegisterClientAPI + // operation. // // This member is required. ClientId *string // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClient API operation. + // the persisted result of the RegisterClientAPI operation. // // This member is required. ClientSecret *string - // The URL for the Amazon Web Services access portal. For more information, see - // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] // in the IAM Identity Center User Guide. // + // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + // // This member is required. StartUrl *string @@ -124,6 +125,9 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -136,6 +140,15 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { return err } @@ -157,6 +170,18 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index 40b3becb9f..e4b87f5bc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -8,11 +8,13 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +92,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -163,7 +165,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -175,6 +180,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -234,7 +242,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -244,12 +255,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -265,6 +284,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -274,6 +294,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -294,9 +317,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index 76a1160ece..93f3653d53 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -13,11 +13,22 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpCreateToken struct { } @@ -33,6 +44,10 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -72,6 +87,7 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -254,6 +270,10 @@ func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -293,6 +313,7 @@ func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -414,6 +435,11 @@ func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenW sv.AccessToken = ptr.String(jtv) } + case "awsAdditionalDetails": + if err := awsRestjson1_deserializeDocumentAwsAdditionalDetails(&sv.AwsAdditionalDetails, value); err != nil { + return err + } + case "expiresIn": if value != nil { jtv, ok := value.(json.Number) @@ -492,6 +518,10 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -531,6 +561,7 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -581,12 +612,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response case strings.EqualFold("InvalidClientMetadataException", errorCode): return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + case strings.EqualFold("InvalidRedirectUriException", errorCode): + return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) + case strings.EqualFold("InvalidRequestException", errorCode): return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) case strings.EqualFold("InvalidScopeException", errorCode): return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -705,6 +742,10 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -744,6 +785,7 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -1158,6 +1200,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res return output } +func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRedirectUriException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidRequestException{} var buff [1024]byte @@ -1472,6 +1550,46 @@ func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.Aut return nil } +func awsRestjson1_deserializeDocumentAwsAdditionalDetails(v **types.AwsAdditionalDetails, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AwsAdditionalDetails + if *v == nil { + sv = &types.AwsAdditionalDetails{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "identityContext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdentityContext to be of type string, got %T instead", value) + } + sv.IdentityContext = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1717,6 +1835,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran return nil } +func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRedirectUriException + if *v == nil { + sv = &types.InvalidRedirectUriException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 53cd4f55a0..f3510b18c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -6,33 +6,44 @@ // IAM Identity Center OpenID Connect (OIDC) is a web service that enables a // client (such as CLI or a native application) to register with IAM Identity // Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. IAM -// Identity Center uses the sso and identitystore API namespaces. Considerations -// for Using This Guide Before you begin using this guide, we recommend that you -// first review the following important information about how the IAM Identity -// Center OIDC service works. +// upon successful authentication and authorization with IAM Identity Center. +// +// # API namespaces +// +// IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity +// Center OpenID Connect uses the sso-oidc namespace. +// +// # Considerations for using this guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// // - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ( -// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the CLI. +// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to +// enable single sign-on authentication with the CLI. +// // - With older versions of the CLI, the service only emits OIDC access tokens, // so to obtain a new token, users must explicitly re-authenticate. To access the // OIDC flow that supports token refresh and doesn’t require re-authentication, // update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with // support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see Configure Amazon Web Services access -// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) -// . +// durations. For more information, see [Configure Amazon Web Services access portal session duration]. +// // - The access tokens provided by this service grant access to all Amazon Web // Services account entitlements assigned to an IAM Identity Center user, not just // a particular application. +// // - The documentation in this guide does not describe the mechanism to convert // the access token into Amazon Web Services Auth (“sigv4”) credentials for use // with IAM-protected Amazon Web Services service endpoints. For more information, -// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. +// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. // -// For general information about IAM Identity Center, see What is IAM Identity -// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the IAM Identity Center User Guide. +// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity +// Center User Guide. +// +// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html +// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html +// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 +// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 94e835e711..6feea0c9fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -16,6 +16,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -288,6 +289,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -465,7 +477,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -491,6 +503,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -504,12 +519,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -531,5 +551,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index b2a52633ba..35f180975a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -25,11 +25,12 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index cbc7e8415f..04623412dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.4" +const goModuleVersion = "1.30.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index 843edb0742..ba7b4f9eb0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -87,6 +87,7 @@ func New() *Resolver { var partitionRegexp = struct { Aws *regexp.Regexp AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp AwsIsoE *regexp.Regexp @@ -94,8 +95,9 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), @@ -227,6 +229,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-4", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-5.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-5", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -235,6 +245,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -418,6 +436,27 @@ var defaultPartitions = endpoints.Partitions{ }, }, }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, { ID: "aws-iso", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index b964e7e109..55dd80d0e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,8 +52,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -64,23 +68,29 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -95,10 +105,14 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +157,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index 754218b78e..1ad103d1ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -10,6 +10,7 @@ import ( "github.com/aws/smithy-go/encoding/httpbinding" smithyjson "github.com/aws/smithy-go/encoding/json" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -23,6 +24,10 @@ func (*awsRestjson1_serializeOpCreateToken) ID() string { func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -66,6 +71,8 @@ func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { @@ -95,6 +102,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.DeviceCode != nil { ok := object.Key("deviceCode") ok.String(*v.DeviceCode) @@ -135,6 +147,10 @@ func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string { func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -178,6 +194,8 @@ func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error { @@ -207,6 +225,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithI ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.GrantType != nil { ok := object.Key("grantType") ok.String(*v.GrantType) @@ -257,6 +280,10 @@ func (*awsRestjson1_serializeOpRegisterClient) ID() string { func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -300,6 +327,8 @@ func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { @@ -324,6 +353,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, ok.String(*v.ClientType) } + if v.EntitledApplicationArn != nil { + ok := object.Key("entitledApplicationArn") + ok.String(*v.EntitledApplicationArn) + } + + if v.GrantTypes != nil { + ok := object.Key("grantTypes") + if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { + return err + } + } + + if v.IssuerUrl != nil { + ok := object.Key("issuerUrl") + ok.String(*v.IssuerUrl) + } + + if v.RedirectUris != nil { + ok := object.Key("redirectUris") + if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { + return err + } + } + if v.Scopes != nil { ok := object.Key("scopes") if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { @@ -344,6 +397,10 @@ func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -387,6 +444,8 @@ func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { @@ -419,6 +478,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic return nil } +func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index 86b62049fd..2cfe7b48fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -188,7 +188,7 @@ func (e *InvalidClientMetadataException) ErrorCode() string { func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateToken request with an invalid grant type. +// makes a CreateTokenrequest with an invalid grant type. type InvalidGrantException struct { Message *string @@ -217,6 +217,36 @@ func (e *InvalidGrantException) ErrorCode() string { } func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRedirectUriException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRedirectUriException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRedirectUriException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Indicates that something is wrong with the input to the request. For example, a // required parameter might be missing or out of range. type InvalidRequestException struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go index 0ec0789f8d..2e8f3ea031 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -6,4 +6,17 @@ import ( smithydocument "github.com/aws/smithy-go/document" ) +// This structure contains Amazon Web Services-specific parameter extensions for +// the token endpoint responses and includes the identity context. +type AwsAdditionalDetails struct { + + // STS context assertion that carries a user identifier to the Amazon Web Services + // service that it calls and can be used to obtain an identity-enhanced IAM role + // session. This value corresponds to the sts:identity_context claim in the ID + // token. + IdentityContext *string + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 2fd5d5a649..6656137c7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,203 @@ +# v1.33.19 (2025-04-10) + +* No change notes available for this release. + +# v1.33.18 (2025-04-03) + +* No change notes available for this release. + +# v1.33.17 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.33.16 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.15 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.14 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.13 (2025-02-04) + +* No change notes available for this release. + +# v1.33.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.33.9 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.33.8 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.7 (2025-01-14) + +* No change notes available for this release. + +# v1.33.6 (2025-01-10) + +* **Documentation**: Fixed typos in the descriptions. + +# v1.33.5 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.4 (2025-01-08) + +* No change notes available for this release. + +# v1.33.3 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.2 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.0 (2024-11-14) + +* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks. + +# v1.32.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.4 (2024-10-03) + +* No change notes available for this release. + +# v1.31.3 (2024-09-27) + +* No change notes available for this release. + +# v1.31.2 (2024-09-25) + +* No change notes available for this release. + +# v1.31.1 (2024-09-23) + +* No change notes available for this release. + +# v1.31.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.8 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.30.7 (2024-09-04) + +* No change notes available for this release. + +# v1.30.6 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.5 (2024-08-22) + +* No change notes available for this release. + +# v1.30.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.29.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2024-05-23) + +* No change notes available for this release. + +# v1.28.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.28.6 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 4d18dc86bd..fca363d2f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -4,6 +4,7 @@ package sts import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -15,25 +16,160 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "STS" const ServiceAPIVersion = "2011-06-15" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts") +} + // Client provides the API client to make operations call for AWS Security Token // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -54,6 +190,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveEndpointResolverV2(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -72,6 +212,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -84,8 +226,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -109,15 +258,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -155,7 +345,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -429,6 +619,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -445,11 +659,36 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -488,6 +727,87 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addRecursionDetection(stack *middleware.Stack) error { return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) } @@ -687,3 +1007,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index e0e2c9c2e8..524e36eb61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,69 +16,98 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service -// with the following exception: You cannot call the Amazon Web Services STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: a role -// trust policy that specifies who can assume the role, and a permissions policy -// that specifies what can be done with the role. You specify the trusted principal -// that is allowed to assume the role in the role trust policy. To assume a role -// from a different account, your Amazon Web Services account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when the -// role is created. That trust policy states which accounts are allowed to delegate -// that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the account -// administrator. The administrator must attach a policy that allows the user to -// call AssumeRole for the ARN of the role in the other account. To allow a user -// to assume a role in the same account, you can do either of the following: +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the +// IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: You +// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies to this operation. +// You can pass a single JSON policy document to use as an inline session policy. +// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use +// as managed session policies. The plaintext that you use for both inline and +// managed session policies can't exceed 2,048 characters. Passing policies to this +// operation returns new temporary credentials. The resulting session's permissions +// are the intersection of the role's identity-based policy and the session +// policies. You can use the role's temporary credentials in subsequent Amazon Web +// Services API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies what +// can be done with the role. You specify the trusted principal that is allowed to +// assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have +// permissions that are delegated from the account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of the +// following: +// // - Attach a policy to the user that allows the user to call AssumeRole (as long // as the role's trust policy trusts the account). +// // - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the same // account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your -// session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole . This is -// useful for cross-account scenarios to ensure that the user that assumes the role -// has been authenticated with an Amazon Web Services MFA device. In that scenario, -// the trust policy of the role being assumed includes a condition that tests for -// MFA authentication. If the caller does not include valid MFA information, the -// request to assume the role is denied. The condition in a trust policy that tests -// for MFA authentication might look like the following example. "Condition": -// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for -// the SerialNumber and TokenCode parameters. The SerialNumber value identifies -// the user's hardware or virtual MFA device. The TokenCode is the time-based -// one-time password (TOTP) that the MFA device produces. +// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM +// User Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information when +// you call AssumeRole . This is useful for cross-account scenarios to ensure that +// the user that assumes the role has been authenticated with an Amazon Web +// Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication might +// look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. +// +// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that the +// MFA device produces. +// +// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -101,17 +130,27 @@ type AssumeRoleInput struct { // This member is required. RoleArn *string - // An identifier for the assumed role session. Use the role session name to - // uniquely identify a session when the same role is assumed by different - // principals or for different reasons. In cross-account scenarios, the role - // session name is visible to, and can be logged by the account that owns the role. - // The role session name is also used in the ARN of the assumed role principal. - // This means that subsequent cross-account API requests that use the temporary - // security credentials will expose the role session name to the external account - // in their CloudTrail logs. The regex used to validate this parameter is a string - // of characters consisting of upper- and lower-case alphanumeric characters with - // no spaces. You can also include underscores or any of the following characters: - // =,.@- + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role is + // assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the account + // that owns the role. The role session name is also used in the ARN of the assumed + // role principal. This means that subsequent cross-account API requests that use + // the temporary security credentials will expose the role session name to the + // external account in their CloudTrail logs. + // + // For security purposes, administrators can view this field in [CloudTrail logs] to help identify + // who performed an action in Amazon Web Services. Your administrator might require + // that you specify your user name as the session name when you assume the role. + // For more information, see [sts:RoleSessionName]sts:RoleSessionName . + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds + // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // // This member is required. RoleSessionName *string @@ -122,23 +161,27 @@ type AssumeRoleInput struct { // hours. If you specify a value higher than this setting or the administrator // setting (whichever is lower), the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web - // Services CLI or Amazon Web Services API role session to a maximum of one hour. - // When you use the AssumeRole API operation to assume a role, you can specify the - // duration of your role session with the DurationSeconds parameter. You can - // specify a parameter value of up to 43200 seconds (12 hours), depending on the - // maximum session duration setting for your role. However, if you assume a role - // using role chaining and provide a DurationSeconds parameter value greater than - // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API + // role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of up to + // 43200 seconds (12 hours), depending on the maximum session duration setting for + // your role. However, if you assume a role using role chaining and provide a + // DurationSeconds parameter value greater than one hour, the operation fails. To + // learn how to view the maximum value for your role, see [Update the maximum session duration for a role]. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // A unique identifier that might be required when you assume a role in another @@ -149,63 +192,82 @@ type AssumeRoleInput struct { // the administrator of the trusting account might send an external ID to the // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about - // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@:/- + // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + // + // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html ExternalId *string // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // For more information about role session permissions, see [Session policies]. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of previously acquired trusted context assertions in the format of a // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. The following is an example of a ProvidedContext value that - // includes a single trusted context assertion and the ARN of the context provider - // from which the trusted context assertion was generated. - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + // Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which the + // trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []types.ProvidedContext // The identification number of the MFA device that is associated with the user @@ -213,79 +275,99 @@ type AssumeRoleInput struct { // the role being assumed includes a condition that requires MFA authentication. // The value is either the serial number for a hardware device (such as // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter - // is a string of characters consisting of upper- and lower-case alphanumeric - // characters with no spaces. You can also include underscores or any of the - // following characters: =,.@- + // arn:aws:iam::123456789012:mfa/user ). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws: . This prefix is - // reserved for Amazon Web Services internal use. + // operation. The source identity value persists across [chained role]sessions. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: +=,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters, and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the role. When you do, session - // tags override a role tag with the same key. Tag key–value pairs are not case - // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the Department = - // Marketing tag and you pass the department = engineering session tag. Department - // and department are not saved as separate tags, and the session tag passed in - // the request takes precedence over the role tag. Additionally, if you used - // temporary credentials to perform this operation, the new session inherits any - // transitive session tags from the calling session. If you pass a session tag with - // the same key as an inherited tag, the operation fails. To view the inherited - // tags for a session, see the CloudTrail logs. For more information, see Viewing - // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // Additionally, if you used temporary credentials to perform this operation, the + // new session inherits any transitive session tags from the calling session. If + // you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. For + // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. + // + // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs Tags []types.Tag // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA. (In other words, if the policy includes a condition that // tests for MFA). If the role being assumed requires MFA and if the TokenCode // value is missing or expired, the AssumeRole call returns an "access denied" - // error. The format for this parameter, as described by its regex pattern, is a - // sequence of six numeric digits. + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string // A list of keys for session tags that you want to set as transitive. If you set // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. When you set session tags as - // transitive, the session policy and session tags packed binary limit is not - // affected. If you choose not to specify a transitive tag key, then no tags are - // passed from this session to any subsequent sessions. + // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. + // + // This parameter is optional. The transitive status of a session tag does not + // impact its packed binary size. + // + // If you choose not to specify a transitive tag key, then no tags are passed from + // this session to any subsequent sessions. + // + // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining TransitiveTagKeys []string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. +// Contains the response to a successful AssumeRole request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -296,9 +378,10 @@ type AssumeRoleOutput struct { AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -308,17 +391,21 @@ type AssumeRoleOutput struct { PackedPolicySize *int32 // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // Metadata pertaining to the operation's result. @@ -370,6 +457,9 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -382,6 +472,15 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { return err } @@ -403,6 +502,18 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index 2a57b72ac9..400f809e30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,92 +16,132 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this -// operation consist of an access key ID, a secret access key, and a security -// token. Applications can use these temporary security credentials to sign calls -// to Amazon Web Services services. Session Duration By default, the temporary -// security credentials created by AssumeRoleWithSAML last for one hour. However, -// you can use the optional DurationSeconds parameter to specify the duration of -// your session. Your role session lasts for the duration that you specify, or -// until the time specified in the SAML authentication response's -// SessionNotOnOrAfter value, whichever is shorter. You can provide a -// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour to 12 -// hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of an +// access key ID, a secret access key, and a security token. Applications can use +// these temporary security credentials to sign calls to Amazon Web Services +// services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML +// authentication response's SessionNotOnOrAfter value, whichever is shorter. You +// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the +// maximum session duration setting for the role. This setting can have a value +// from 1 hour to 12 hours. To learn how to view the maximum value for your role, +// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console URL. +// For more information, see [Using IAM Roles]in the IAM User Guide. +// +// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. // You can specify a parameter value of up to 43200 seconds (12 hours), depending // on the maximum session duration setting for your role. However, if you assume a // role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. Permissions The temporary security -// credentials created by AssumeRoleWithSAML can be used to make API calls to any -// Amazon Web Services service with the following exception: you cannot call the -// STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of -// Amazon Web Services security credentials. The identity of the caller is -// validated by using keys in the metadata document that is uploaded for the SAML -// provider entity for your identity provider. Calling AssumeRoleWithSAML can -// result in an entry in your CloudTrail logs. The entry includes the value in the -// NameID element of the SAML assertion. We recommend that you use a NameIDType -// that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier ( -// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can -// configure your IdP to pass attributes into your SAML assertion as session tags. -// Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, session tags -// override the role's tags with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to -// issue the claims required by Amazon Web Services. Additionally, you must use -// Identity and Access Management (IAM) to create a SAML provider entity in your -// Amazon Web Services account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used to +// make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys in +// the metadata document that is uploaded for the SAML provider entity for your +// identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The +// entry includes the value in the NameID element of the SAML assertion. We +// recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the +// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML +// assertion as session tags. Each session tag consists of a key name and an +// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, session tags override the role's tags with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML , you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web +// Services. Additionally, you must use Identity and Access Management (IAM) to +// create a SAML provider entity in your Amazon Web Services account that +// represents your identity provider. You must also create an IAM role that +// specifies this SAML provider in its trust policy. +// // For more information, see the following resources: -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// +// [About SAML 2.0-based Federation] +// - in the IAM User Guide. +// +// [Creating SAML Identity Providers] +// - in the IAM User Guide. +// +// [Configuring a Relying Party and Claims] +// - in the IAM User Guide. +// +// [Creating a Role for SAML 2.0 Federation] +// - in the IAM User Guide. +// +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html +// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -130,9 +170,11 @@ type AssumeRoleWithSAMLInput struct { // This member is required. RoleArn *string - // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. + // + // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html // // This member is required. SAMLAssertion *string @@ -146,92 +188,117 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // For more information about role session permissions, see [Session policies]. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithSAMLOutput struct { // The identifiers for the temporary security credentials that the operation // returns. AssumedRoleUser *types.AssumedRoleUser - // The value of the Recipient attribute of the SubjectConfirmationData element of + // The value of the Recipient attribute of the SubjectConfirmationData element of // the SAML assertion. Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // The value of the Issuer element of the SAML assertion. Issuer *string // A hash value based on the concatenation of the following: + // // - The Issuer response value. + // // - The Amazon Web Services account ID. + // // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. The following pseudocode shows how the hash value is calculated: BASE64 ( - // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -240,31 +307,37 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can - // require users to set a source identity value when they assume a role. You do - // this by using the sts:SourceIdentity condition key in a role trust policy. That - // way, actions that are taken with the role are associated with that user. After - // the source identity is set, the value cannot be changed. It is present in the - // request for all actions that are taken by the role and persists across chained - // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML - // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // The value in the SourceIdentity attribute in the SAML assertion. The source + // identity value persists across [chained role]sessions. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your SAML identity provider to use an + // attribute associated with your users, like user name or email, as the source + // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to + // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in + // the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // The value of the NameID element in the Subject element of the SAML assertion. Subject *string - // The format of the name ID, as defined by the Format attribute in the NameID + // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent . If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // persistent . + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , + // that prefix is removed. For example, // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . // If the format includes any other prefix, the format is returned with no // modifications. @@ -316,6 +389,9 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -328,6 +404,15 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { return err } @@ -349,6 +434,18 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 98108ce6af..e5708cbd1d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,105 +14,132 @@ import ( // Returns a set of temporary security credentials for users who have been // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// . For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. To learn more about Amazon -// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not -// require the use of Amazon Web Services security credentials. Therefore, you can -// distribute an application (for example, on mobile devices) that requests -// temporary security credentials without including long-term Amazon Web Services -// credentials in the application. You also don't need to deploy server-based proxy -// services that use long-term Amazon Web Services credentials. Instead, the -// identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this API -// consist of an access key ID, a secret access key, and a security token. -// Applications can use these temporary security credentials to sign calls to -// Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for -// one hour. However, you can use the optional DurationSeconds parameter to -// specify the duration of your session. You can provide a value from 900 seconds -// (15 minutes) up to the maximum session duration setting for the role. This -// setting can have a value from 1 hour to 12 hours. To learn how to view the -// maximum value for your role, see View the Maximum Session Duration Setting for -// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web -// Services service with the following exception: you cannot call the STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass -// attributes into your web identity token as session tags. Each session tag -// consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, the session tag -// overrides the role tag with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity , you must have an identity token from a supported -// identity provider and create a role that the application can assume. The role -// that your application assumes must trust the identity provider that is -// associated with the identity token. In other words, the identity provider must -// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you could -// instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) -// . For more information about how to use web identity federation and the +// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also +// supply the user with a consistent identity throughout the lifetime of an +// application. +// +// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application (for +// example, on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service API +// operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by +// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional +// DurationSeconds parameter to specify the duration of your session. You can +// provide a value from 900 seconds (15 minutes) up to the maximum session duration +// setting for the role. This setting can have a value from 1 hour to 12 hours. To +// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide. +// The maximum session duration limit applies when you use the AssumeRole* API +// operations or the assume-role* CLI commands. However the limit does not apply +// when you use those operations to create a console URL. For more information, see +// [Using IAM Roles]in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can be +// used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken API +// operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, the session tag overrides the role tag with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity , you must have an +// identity token from a supported identity provider and create a role that the +// application can assume. The role that your application assumes must trust the +// identity provider that is associated with the identity token. In other words, +// the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the [Subject]of the provided web identity token. We recommend +// that you avoid using any personally identifiable information (PII) in this +// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. +// +// For more information about how to use OIDC federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// . -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) -// . Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then using -// those credentials to make a request to Amazon Web Services. -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// . These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these +// +// [Using Web Identity Federation API Operations for Mobile Apps] +// - and [Federation Through a Web-based Identity Provider]. +// +// [Amazon Web Services SDK for iOS Developer Guide] +// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the +// identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) -// . This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration +// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html +// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -132,6 +159,17 @@ type AssumeRoleWithWebIdentityInput struct { // The Amazon Resource Name (ARN) of the role that the caller is assuming. // + // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles]. + // The trust policies of these roles must accept the cognito-identity.amazonaws.com + // service principal and must contain the cognito-identity.amazonaws.com:aud + // condition key to restrict role assumption to users from your intended identity + // pools. A policy that trusts Amazon Cognito identity pools without this condition + // creates a risk that a user from an unintended identity pool can assume the role. + // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide. + // + // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html + // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies + // // This member is required. RoleArn *string @@ -139,10 +177,19 @@ type AssumeRoleWithWebIdentityInput struct { // identifier that is associated with the user who is using your application. That // way, the temporary security credentials that your application will use are // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. The regex used to - // validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@- + // assumed role ID in the AssumedRoleUser response element. + // + // For security purposes, administrators can view this field in [CloudTrail logs] to help identify + // who performed an action in Amazon Web Services. Your administrator might require + // that you specify your user name as the session name when you assume the role. + // For more information, see [sts:RoleSessionName]sts:RoleSessionName . + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds + // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname // // This member is required. RoleSessionName *string @@ -150,8 +197,10 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the // identity provider. Your application must get this token by authenticating the // user who is using your application with a web identity provider before the - // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA - // algorithms (RS256) are supported. + // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token + // must be formatted as either an integer or a long integer. Tokens must be signed + // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or + // ES512). // // This member is required. WebIdentityToken *string @@ -162,73 +211,93 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // For more information about role session permissions, see [Session policies]. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // The fully qualified host component of the domain name of the OAuth 2.0 identity // provider. Do not specify this value for an OpenID Connect identity provider. + // // Currently www.amazon.com and graph.facebook.com are the only supported identity // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. Do not specify this value for OpenID Connect ID tokens. + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. ProviderId *string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. +// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithWebIdentityOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -244,9 +313,10 @@ type AssumeRoleWithWebIdentityOutput struct { Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. The size of the security token that STS API - // operations return is not fixed. We strongly recommend that you make no - // assumptions about the maximum size. + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -255,30 +325,34 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect + // The issuing authority of the web identity token presented. For OpenID Connect // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed in // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. You can require users to set a source identity value - // when they assume a role. You do this by using the sts:SourceIdentity condition - // key in a role trust policy. That way, actions that are taken with the role are - // associated with that user. After the source identity is set, the value cannot be - // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon + // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string // The unique user identifier that is returned by the identity provider. This @@ -335,6 +409,9 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -347,6 +424,15 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { return err } @@ -368,6 +454,18 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go new file mode 100644 index 0000000000..a0f7a46713 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go @@ -0,0 +1,223 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of short term credentials you can use to perform privileged tasks +// on a member account in your organization. +// +// Before you can launch a privileged session, you must have centralized root +// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM +// User Guide. +// +// The STS global endpoint is not supported for AssumeRoot. You must send this +// request to a Regional STS endpoint. For more information, see [Endpoints]. +// +// You can track AssumeRoot in CloudTrail logs to determine what actions were +// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide. +// +// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints +// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html +// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html +func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) { + if params == nil { + params = &AssumeRootInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRootOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRootInput struct { + + // The member account principal ARN or account ID. + // + // This member is required. + TargetPrincipal *string + + // The identity based policy that scopes the session to the privileged tasks that + // can be performed. You can use one of following Amazon Web Services managed + // policies to scope root session actions. + // + // [IAMAuditRootUserCredentials] + // + // [IAMCreateRootUserPassword] + // + // [IAMDeleteRootUserCredentials] + // + // [S3UnlockBucketPolicy] + // + // [SQSUnlockQueuePolicy] + // + // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials + // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword + // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials + // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy + // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy + // + // This member is required. + TaskPolicyArn *types.PolicyDescriptorType + + // The duration, in seconds, of the privileged session. The value can range from 0 + // seconds up to the maximum session duration of 900 seconds (15 minutes). If you + // specify a value higher than this setting, the operation fails. + // + // By default, the value is set to 900 seconds. + DurationSeconds *int32 + + noSmithyDocumentSerde +} + +type AssumeRootOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. + Credentials *types.Credentials + + // The source identity specified by the principal that is calling the AssumeRoot + // operation. + // + // You can use the aws:SourceIdentity condition key to control access based on the + // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpAssumeRootValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "AssumeRoot", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index b4ad54ab2f..9e7cb17d36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -11,28 +11,39 @@ import ( ) // Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. For -// example, if a user is not authorized to perform an operation that he or she has -// requested, the request returns a Client.UnauthorizedOperation response (an HTTP -// 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. Only -// certain Amazon Web Services operations return an encoded authorization message. -// The documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. The message is -// encoded because the details of the authorization status can contain privileged -// information that the user who requested the operation should not see. To decode -// an authorization status message, a user must be granted permissions through an -// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) -// action. The decoded message includes the following type of information: +// an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he or she +// has requested, the request returns a Client.UnauthorizedOperation response (an +// HTTP 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether that +// operation returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation should +// not see. To decode an authorization status message, a user must be granted +// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( +// sts:DecodeAuthorizationMessage ) action. +// +// The decoded message includes the following type of information: +// // - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether a -// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User +// Guide. +// // - The principal who made the request. +// // - The requested action. +// // - The requested resource. +// // - The values of condition keys in the context of the user's request. +// +// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow +// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} @@ -115,6 +126,9 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +141,15 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { return err } @@ -148,6 +171,18 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index 1f7cbcc2bb..28c05f13bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -10,23 +10,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and -// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). -// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. When you pass an access key ID to this operation, it -// returns the ID of the Amazon Web Services account to which the keys belong. -// Access key IDs beginning with AKIA are long-term credentials for an IAM user or -// the Amazon Web Services account root user. Access key IDs beginning with ASIA -// are temporary credentials that are created using STS operations. If the account -// in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. This operation does not indicate the state of the access -// key. The key might be active, inactive, or deleted. Active keys might not have -// permissions to perform an operation. Providing a deleted access key might return -// an error that the key doesn't exist. +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, +// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, +// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access +// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs to +// you, you can sign in as the root user and review your root user access keys. +// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who +// requested the temporary credentials for an ASIA access key, view the STS events +// in your [CloudTrail logs]in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might be +// active, inactive, or deleted. Active keys might not have permissions to perform +// an operation. Providing a deleted access key might return an error that the key +// doesn't exist. +// +// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html +// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html +// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { if params == nil { params = &GetAccessKeyInfoInput{} @@ -44,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI type GetAccessKeyInfoInput struct { - // The identifier of an access key. This parameter allows (through its regex - // pattern) a string of characters that can consist of any upper- or lowercase - // letter or digit. + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters that + // can consist of any upper- or lowercase letter or digit. // // This member is required. AccessKeyId *string @@ -108,6 +117,9 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -120,6 +132,15 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { return err } @@ -141,6 +162,18 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index acb7ede44f..de137b7dc4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -12,13 +12,15 @@ import ( ) // Returns details about the IAM user or role whose credentials are used to call -// the operation. No permissions are required to perform this operation. If an -// administrator attaches a policy to your identity that explicitly denies access -// to the sts:GetCallerIdentity action, you can still perform this operation. -// Permissions are not required because the same information is returned when -// access is denied. To view an example response, see I Am Not Authorized to -// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. +// the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when access is denied. +// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. +// +// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { params = &GetCallerIdentityInput{} @@ -38,8 +40,8 @@ type GetCallerIdentityInput struct { noSmithyDocumentSerde } -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. +// Contains the response to a successful GetCallerIdentity request, including information about the +// entity making the request. type GetCallerIdentityOutput struct { // The Amazon Web Services account ID number of the account that owns or contains @@ -51,8 +53,10 @@ type GetCallerIdentityOutput struct { // The unique identifier of the calling entity. The exact value depends on the // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. + // the aws:userid column in the [Principal table]found on the Policy Variables reference page in + // the IAM User Guide. + // + // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable UserId *string // Metadata pertaining to the operation's result. @@ -104,6 +108,9 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -116,6 +123,15 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } @@ -134,6 +150,18 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index 3679618cb5..67c041b30e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -14,74 +14,100 @@ import ( // Returns a set of temporary security credentials (consisting of an access key // ID, a secret access key, and a security token) for a user. A typical use is in a // proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the -// GetFederationToken operation using the long-term security credentials of an IAM -// user. As a result, this call is appropriate in contexts where those credentials -// can be safeguarded, usually in a server-based application. For a comparison of -// GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Although it is possible to call GetFederationToken using -// the security credentials of an Amazon Web Services account root user rather than -// an IAM user that you create for the purpose of a proxy application, we do not -// recommend it. For more information, see Safeguard your root user credentials -// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. Session duration The temporary credentials are valid for -// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the root user credentials have a maximum -// duration of 3,600 seconds (1 hour). Permissions You can use the temporary -// credentials created by GetFederationToken in any Amazon Web Services service -// with the following exceptions: +// distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based +// application. For a comparison of GetFederationToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security +// credentials of an Amazon Web Services account root user rather than an IAM user +// that you create for the purpose of a proxy application, we do not recommend it. +// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by +// using the root user credentials have a maximum duration of 3,600 seconds (1 +// hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM operations using the CLI or the Amazon Web Services // API. This limitation does not apply to console sessions. +// // - You cannot call any STS operations except GetCallerIdentity . // -// You can use temporary credentials for single sign-on (SSO) to the console. You -// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed [session policy] to this operation. You can pass a single +// JSON policy document to use as an inline session policy. You can also specify up +// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session +// policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. +// // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass // session policies, the session permissions are the intersection of the IAM user // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to create -// temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) -// . You can use the credentials to access a resource that has a resource-based +// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For +// information about using GetFederationToken to create temporary security +// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. +// +// You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by -// the session policies. Tags (Optional) You can pass tag key-value pairs to your -// session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is -// preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the Department = -// Marketing tag and you pass the department = engineering session tag. Department -// and department are not saved as separate tags, and the session tag passed in -// the request takes precedence over the user tag. +// the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This means +// that you cannot have separate Department and department tag keys. Assume that +// the user that you are federating has the Department = Marketing tag and you +// pass the department = engineering session tag. Department and department are +// not saved as separate tags, and the session tag passed in the request takes +// precedence over the user tag. +// +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito]: http://aws.amazon.com/cognito/ +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -102,10 +128,11 @@ type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the // temporary security credentials (such as Bob ). For example, you can reference // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. Name *string @@ -119,99 +146,127 @@ type GetFederationTokenInput struct { DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. This parameter is - // optional. However, if you do not pass any session policies, then the resulting - // federated user session has no permissions. When you pass session policies, the - // session permissions are the intersection of the IAM user policies and the - // session policies that you pass. This gives you a way to further restrict the - // permissions for a federated user. You cannot use session policies to grant more - // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // The plaintext that you use for both inline and managed session policies can't // exceed 2,048 characters. The JSON policy characters can be any ASCII character // from the space character to the end of the valid character list (\u0020 through // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. An Amazon Web Services conversion compresses the - // passed inline session policy, managed policy ARNs, and session tags into a - // packed binary format that has a separate limit. Your request can fail for this - // limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. - Policy *string - - // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to - // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. The plaintext that you - // use for both inline and managed session policies can't exceed 2,048 characters. - // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. This parameter is optional. - // However, if you do not pass any session policies, then the resulting federated - // user session has no permissions. When you pass session policies, the session - // permissions are the intersection of the IAM user policies and the session - // policies that you pass. This gives you a way to further restrict the permissions - // for a federated user. You cannot use session policies to grant more permissions - // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // return (\u000D) characters. + // // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext // meets the other requirements. The PackedPolicySize response element indicates // by percentage how close the policies and tags for your request are to the upper // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. The plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. You can provide up to 10 managed policy + // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General + // Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see Passing Session - // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the user you are federating. When - // you do, session tags override a user tag with the same key. Tag key–value pairs - // are not case sensitive, but case is preserved. This means that you cannot have - // separate Department and department tag keys. Assume that the role has the - // Department = Marketing tag and you pass the department = engineering session - // tag. Department and department are not saved as separate tags, and the session - // tag passed in the request takes precedence over the role tag. + // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User + // Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user tag + // with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length Tags []types.Tag noSmithyDocumentSerde } -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetFederationToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as @@ -275,6 +330,9 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -287,6 +345,15 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { return err } @@ -308,6 +375,18 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 751fb147d4..903d151ce2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -15,43 +15,58 @@ import ( // IAM user. The credentials consist of an access key ID, a secret access key, and // a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and -// submit an MFA code that is associated with their MFA device. Using the temporary -// security credentials that the call returns, IAM users can then make programmatic -// calls to API operations that require MFA authentication. An incorrect MFA code -// causes the API to return an access denied error. For a comparison of -// GetSessionToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. No permissions are required for users to perform this -// operation. The purpose of the sts:GetSessionToken operation is to authenticate -// the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of an IAM -// user. Credentials that are created by IAM users are valid for the duration that -// you specify. This duration can range from 900 seconds (15 minutes) up to a -// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 -// hours). Credentials based on account credentials can range from 900 seconds (15 -// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The -// temporary security credentials created by GetSessionToken can be used to make -// API calls to any Amazon Web Services service with the following exceptions: +// Amazon EC2 StopInstances . +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is +// associated with their MFA device. Using the temporary security credentials that +// the call returns, IAM users can then make programmatic calls to API operations +// that require MFA authentication. An incorrect MFA code causes the API to return +// an access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose of +// the sts:GetSessionToken operation is to authenticate the user using MFA. You +// cannot use policies to control authentication operations. For more information, +// see [Permissions for GetSessionToken]in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon Web +// Services security credentials of an IAM user. Credentials that are created by +// IAM users are valid for the duration that you specify. This duration can range +// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), +// with a default of 43,200 seconds (12 hours). Credentials based on account +// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 +// hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used to +// make API calls to any Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM API operations unless MFA authentication // information is included in the request. +// // - You cannot call any STS API except AssumeRole or GetCallerIdentity . // // The credentials that GetSessionToken returns are based on permissions // associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. Although it -// is possible to call GetSessionToken using the security credentials of an Amazon -// Web Services account root user rather than an IAM user, we do not recommend it. -// If GetSessionToken is called using root user credentials, the temporary -// credentials have root user permissions. For more information, see Safeguard -// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide For more information about using GetSessionToken to -// create temporary credentials, see Temporary Credentials for Users in Untrusted -// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. +// The temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do not +// recommend it. If GetSessionToken is called using root user credentials, the +// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in +// the IAM User Guide +// +// For more information about using GetSessionToken to create temporary +// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. +// +// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} @@ -83,10 +98,11 @@ type GetSessionTokenInput struct { // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. The regex used - // to validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@:/- + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- SerialNumber *string // The value provided by the MFA device, if MFA is required. If any policy @@ -94,22 +110,24 @@ type GetSessionTokenInput struct { // authentication is required, the user must provide a code when requesting a set // of temporary security credentials. A user who fails to provide the code receives // an "access denied" response when requesting resources that require MFA - // authentication. The format for this parameter, as described by its regex - // pattern, is a sequence of six numeric digits. + // authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string noSmithyDocumentSerde } -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetSessionToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Metadata pertaining to the operation's result. @@ -161,6 +179,9 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -173,6 +194,15 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } @@ -191,6 +221,18 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addDisableHTTPSMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index 9db5bfd434..a90b2b7362 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -8,11 +8,13 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +92,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -157,7 +159,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -169,6 +174,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -228,7 +236,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -238,12 +249,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -259,6 +278,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -268,6 +288,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -288,9 +311,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 5d634ce35c..59349890f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -16,12 +16,22 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsquery_deserializeOpAssumeRole struct { } @@ -37,6 +47,10 @@ func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -154,6 +168,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -277,6 +295,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -388,6 +410,121 @@ func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhtt } } +type awsAwsquery_deserializeOpAssumeRoot struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoot) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata) + } + output := &AssumeRootOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRootResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { } @@ -403,6 +540,10 @@ func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize( return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -511,6 +652,10 @@ func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -616,6 +761,10 @@ func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -721,6 +870,10 @@ func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -835,6 +988,10 @@ func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2226,6 +2383,61 @@ func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **Assume return nil } +func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRootOutput + if *v == nil { + sv = &AssumeRootOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index d963fd8d19..cbb19c7f66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -3,9 +3,11 @@ // Package sts provides the API client, operations, and parameter types for AWS // Security Token Service. // -// Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for users. This guide provides -// descriptions of the STS API. For more information about using this service, see -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// . +// # Security Token Service +// +// Security Token Service (STS) enables you to request temporary, +// limited-privilege credentials for users. This guide provides descriptions of the +// STS API. For more information about using this service, see [Temporary Security Credentials]. +// +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index 32e2d5435f..dca2ce3599 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -17,6 +17,7 @@ import ( smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -306,6 +307,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1045,7 +1057,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -1071,6 +1083,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -1084,12 +1099,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -1111,5 +1131,6 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index 6b6e839e6c..86bb3b79be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -13,6 +13,7 @@ "api_op_AssumeRole.go", "api_op_AssumeRoleWithSAML.go", "api_op_AssumeRoleWithWebIdentity.go", + "api_op_AssumeRoot.go", "api_op_DecodeAuthorizationMessage.go", "api_op_GetAccessKeyInfo.go", "api_op_GetCallerIdentity.go", @@ -31,11 +32,12 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/sts", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 6e0f31d271..a984a2a6d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.6" +const goModuleVersion = "1.33.19" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index 3dbd993b54..8ee3eed858 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -87,6 +87,7 @@ func New() *Resolver { var partitionRegexp = struct { Aws *regexp.Regexp AwsCn *regexp.Regexp + AwsEusc *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp AwsIsoE *regexp.Regexp @@ -94,8 +95,9 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), @@ -172,6 +174,12 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-4", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ @@ -219,6 +227,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "sa-east-1", }: endpoints.Endpoint{}, @@ -339,6 +350,27 @@ var defaultPartitions = endpoints.Partitions{ }: endpoints.Endpoint{}, }, }, + { + ID: "aws-eusc", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.eu", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsEusc, + IsRegionalized: true, + }, { ID: "aws-iso", Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ @@ -414,6 +446,11 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsIsoE, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "eu-isoe-west-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-iso-f", @@ -435,6 +472,14 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsIsoF, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-us-gov", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index 5c1be79f8c..e1398f3bb8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -9,7 +9,9 @@ import ( internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,8 +52,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -64,23 +68,29 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -95,10 +105,14 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +157,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go index 4c08061c0c..96b222136b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -11,6 +11,7 @@ import ( smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/encoding/httpbinding" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "path" ) @@ -25,6 +26,10 @@ func (*awsAwsquery_serializeOpAssumeRole) ID() string { func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -76,6 +81,8 @@ func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -89,6 +96,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -140,6 +151,8 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -153,6 +166,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -204,6 +221,78 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoot struct { +} + +func (*awsAwsquery_serializeOpAssumeRoot) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRootInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoot") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -217,6 +306,10 @@ func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -268,6 +361,8 @@ func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -281,6 +376,10 @@ func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -332,6 +431,8 @@ func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -345,6 +446,10 @@ func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -392,6 +497,8 @@ func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -405,6 +512,10 @@ func (*awsAwsquery_serializeOpGetFederationToken) ID() string { func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -456,6 +567,8 @@ func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } @@ -469,6 +582,10 @@ func (*awsAwsquery_serializeOpGetSessionToken) ID() string { func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -520,6 +637,8 @@ func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { @@ -772,6 +891,30 @@ func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRole return nil } +func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.TargetPrincipal != nil { + objectKey := object.Key("TargetPrincipal") + objectKey.String(*v.TargetPrincipal) + } + + if v.TaskPolicyArn != nil { + objectKey := object.Key("TaskPolicyArn") + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil { + return err + } + } + + return nil +} + func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { object := value.Object() _ = object diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 097875b279..041629bba2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -65,9 +65,10 @@ func (e *IDPCommunicationErrorException) ErrorCode() string { func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. If this error is returned for the -// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired -// or has been explicitly revoked. +// because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it can +// also mean that the claim has expired or has been explicitly revoked. type IDPRejectedClaimException struct { Message *string @@ -94,8 +95,8 @@ func (e *IDPRejectedClaimException) ErrorCode() string { func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// invalid. This can happen if the token contains invalid characters, such as line +// breaks, or if the message has expired. type InvalidAuthorizationMessageException struct { Message *string @@ -183,11 +184,13 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You could receive this error even though you meet other -// defined session policy and session tag limits. For more information, see IAM -// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. +// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. +// +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length type PackedPolicyTooLargeException struct { Message *string @@ -215,9 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index e3701d11d1..dff7a3c2e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -11,10 +11,11 @@ import ( // returns. type AssumedRoleUser struct { - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // The ARN of the temporary security credentials that are returned from the AssumeRole + // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in + // the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -61,8 +62,9 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // see [IAM Identifiers]in the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -81,9 +83,10 @@ type FederatedUser struct { type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. + // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web + // Services General Reference. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html Arn *string noSmithyDocumentSerde @@ -107,23 +110,30 @@ type ProvidedContext struct { // You can pass custom key-value pair attributes when you assume a role or // federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. +// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User +// Guide. +// +// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html type Tag struct { - // The key for a session tag. You can pass up to 50 session tags. The plain text - // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Key *string - // The value for a session tag. You can pass up to 50 session tags. The plain text - // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Value *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go index 3e4bad2a92..1026e22118 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -70,6 +70,26 @@ func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Conte return next.HandleInitialize(ctx, in) } +type validateOpAssumeRoot struct { +} + +func (*validateOpAssumeRoot) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRootInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRootInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDecodeAuthorizationMessage struct { } @@ -142,6 +162,10 @@ func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) } +func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After) +} + func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) } @@ -254,6 +278,24 @@ func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) } } +func validateOpAssumeRootInput(v *AssumeRootInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"} + if v.TargetPrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal")) + } + if v.TaskPolicyArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { if v == nil { return nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 567514410a..3790c93fd7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -456,8 +456,8 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.36.0 -## explicit; go 1.21 +# github.com/aws/aws-sdk-go-v2 v1.36.3 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn github.com/aws/aws-sdk-go-v2/aws/defaults @@ -484,15 +484,15 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.27.11 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/config v1.29.14 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.11 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.67 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/endpointcreds @@ -500,21 +500,21 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 -## explicit; go 1.21 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 -## explicit; go 1.21 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 @@ -533,17 +533,17 @@ github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types github.com/aws/aws-sdk-go-v2/service/iam github.com/aws/aws-sdk-go-v2/service/iam/internal/endpoints github.com/aws/aws-sdk-go-v2/service/iam/types -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config @@ -558,25 +558,25 @@ github.com/aws/aws-sdk-go-v2/service/route53 github.com/aws/aws-sdk-go-v2/service/route53/internal/customizations github.com/aws/aws-sdk-go-v2/service/route53/internal/endpoints github.com/aws/aws-sdk-go-v2/service/route53/types -# github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types From 66ed61c916b7ae2ac91e0b49cc6c2e36ec6439ea Mon Sep 17 00:00:00 2001 From: barbacbd Date: Fri, 30 May 2025 16:05:16 -0400 Subject: [PATCH 2/2] pkg/infrastructure/aws/clusterapi: ** Updating infrastructure module to use the sdk v2. --- pkg/infrastructure/aws/clusterapi/ami.go | 34 ++- pkg/infrastructure/aws/clusterapi/aws.go | 220 ++++++++++-------- pkg/infrastructure/aws/clusterapi/iam.go | 101 +++++--- pkg/infrastructure/aws/clusterapi/ignition.go | 35 ++- 4 files changed, 240 insertions(+), 150 deletions(-) diff --git a/pkg/infrastructure/aws/clusterapi/ami.go b/pkg/infrastructure/aws/clusterapi/ami.go index f2cacdc591..cdfd08ed3f 100644 --- a/pkg/infrastructure/aws/clusterapi/ami.go +++ b/pkg/infrastructure/aws/clusterapi/ami.go @@ -5,8 +5,10 @@ import ( "fmt" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + configv2 "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/sirupsen/logrus" "github.com/openshift/installer/pkg/asset/installconfig" @@ -20,13 +22,21 @@ func copyAMIToRegion(ctx context.Context, installConfig *installconfig.InstallCo logrus.Infof("Copying AMI %s to region %s", amiID, installConfig.AWS.Region) - session, err := installConfig.AWS.Session(ctx) + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(installConfig.Config.AWS.Region)) if err != nil { - return "", fmt.Errorf("failed to get AWS session: %w", err) + return "", fmt.Errorf("failed to load AWS config: %w", err) } - client := ec2.New(session) - res, err := client.CopyImageWithContext(ctx, &ec2.CopyImageInput{ + client := ec2.NewFromConfig(cfg, func(options *ec2.Options) { + options.Region = installConfig.Config.AWS.Region + for _, endpoint := range installConfig.Config.AWS.ServiceEndpoints { + if strings.EqualFold(endpoint.Name, "ec2") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) + + res, err := client.CopyImage(ctx, &ec2.CopyImageInput{ Name: aws.String(fmt.Sprintf("%s-master", infraID)), ClientToken: aws.String(infraID), Description: aws.String("Created by Openshift Installer"), @@ -39,9 +49,9 @@ func copyAMIToRegion(ctx context.Context, installConfig *installconfig.InstallCo } name := fmt.Sprintf("%s-ami-%s", infraID, installConfig.AWS.Region) - amiTags := make([]*ec2.Tag, 0, len(installConfig.Config.AWS.UserTags)+4) + amiTags := make([]ec2types.Tag, 0, len(installConfig.Config.AWS.UserTags)+4) for k, v := range installConfig.Config.AWS.UserTags { - amiTags = append(amiTags, &ec2.Tag{ + amiTags = append(amiTags, ec2types.Tag{ Key: aws.String(k), Value: aws.String(v), }) @@ -52,19 +62,19 @@ func copyAMIToRegion(ctx context.Context, installConfig *installconfig.InstallCo "sourceRegion": amiRegion, fmt.Sprintf("kubernetes.io/cluster/%s", infraID): "owned", } { - amiTags = append(amiTags, &ec2.Tag{ + amiTags = append(amiTags, ec2types.Tag{ Key: aws.String(k), Value: aws.String(v), }) } - _, err = client.CreateTagsWithContext(ctx, &ec2.CreateTagsInput{ - Resources: []*string{res.ImageId}, + _, err = client.CreateTags(ctx, &ec2.CreateTagsInput{ + Resources: []string{aws.ToString(res.ImageId)}, Tags: amiTags, }) if err != nil { return "", fmt.Errorf("failed to tag AMI copy (%s): %w", name, err) } - return aws.StringValue(res.ImageId), nil + return aws.ToString(res.ImageId), nil } diff --git a/pkg/infrastructure/aws/clusterapi/aws.go b/pkg/infrastructure/aws/clusterapi/aws.go index 0a906d1a4e..bfd073b940 100644 --- a/pkg/infrastructure/aws/clusterapi/aws.go +++ b/pkg/infrastructure/aws/clusterapi/aws.go @@ -7,13 +7,14 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go-v2/aws" + configv2 "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + elbv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elbv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/aws/aws-sdk-go-v2/service/s3" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,6 +23,7 @@ import ( capa "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/openshift/installer/pkg/asset/installconfig" awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" awsmanifest "github.com/openshift/installer/pkg/asset/manifests/aws" "github.com/openshift/installer/pkg/asset/manifests/capiutils" @@ -128,7 +130,7 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) vpcID := awsCluster.Spec.NetworkSpec.VPC.ID if len(subnetIDs) > 0 && len(vpcID) == 0 { // All subnets belong to the same VPC, so we only need one - vpcID, err = getVPCFromSubnets(ctx, awsSession, awsCluster.Spec.Region, subnetIDs[:1]) + vpcID, err = getVPCFromSubnets(ctx, in.InstallConfig, subnetIDs[:1]) if err != nil { return err } @@ -160,7 +162,7 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) if err != nil { return fmt.Errorf("failed to create private hosted zone: %w", err) } - phzID = aws.StringValue(res.Id) + phzID = *res.Id logrus.Infoln("Created private Hosted Zone") } @@ -175,7 +177,7 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) } pubLB := awsCluster.Status.Network.SecondaryAPIServerELB - aliasZoneID, err := getHostedZoneIDForNLB(ctx, awsSession, awsCluster.Spec.Region, pubLB.Name) + aliasZoneID, err := getHostedZoneIDForNLB(ctx, in.InstallConfig, pubLB.Name) if err != nil { return fmt.Errorf("failed to find HostedZone ID for NLB: %w", err) } @@ -184,7 +186,7 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) Name: apiName, Region: awsCluster.Spec.Region, DNSTarget: pubLB.DNSName, - ZoneID: aws.StringValue(zone.Id), + ZoneID: *zone.Id, AliasZoneID: aliasZoneID, HostedZoneRole: "", // we dont want to assume role here }); err != nil { @@ -193,7 +195,7 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) logrus.Debugln("Created public API record in public zone") } - aliasZoneID, err := getHostedZoneIDForNLB(ctx, awsSession, awsCluster.Spec.Region, awsCluster.Status.Network.APIServerELB.Name) + aliasZoneID, err := getHostedZoneIDForNLB(ctx, in.InstallConfig, awsCluster.Status.Network.APIServerELB.Name) if err != nil { return fmt.Errorf("failed to find HostedZone ID for NLB: %w", err) } @@ -227,61 +229,84 @@ func (*Provider) InfraReady(ctx context.Context, in clusterapi.InfraReadyInput) return nil } -func getVPCFromSubnets(ctx context.Context, awsSession *session.Session, region string, subnetIDs []string) (string, error) { +func getVPCFromSubnets(ctx context.Context, ic *installconfig.InstallConfig, subnetIDs []string) (string, error) { var vpcID string - var lastError error - client := ec2.New(awsSession, aws.NewConfig().WithRegion(region)) - err := client.DescribeSubnetsPagesWithContext( - ctx, - &ec2.DescribeSubnetsInput{SubnetIds: aws.StringSlice(subnetIDs)}, - func(results *ec2.DescribeSubnetsOutput, lastPage bool) bool { - for _, subnet := range results.Subnets { - if subnet.SubnetId == nil { - continue - } - if subnet.SubnetArn == nil { - lastError = fmt.Errorf("%s has no ARN", *subnet.SubnetId) - return false - } - if subnet.VpcId == nil { - lastError = fmt.Errorf("%s has no VPC", *subnet.SubnetId) - return false - } - if subnet.AvailabilityZone == nil { - lastError = fmt.Errorf("%s has no availability zone", *subnet.SubnetId) - return false - } - // All subnets belong to the same VPC - vpcID = aws.StringValue(subnet.VpcId) - lastError = nil - return true - } - return !lastPage - }, - ) - if err == nil { - err = lastError - } + + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(ic.Config.AWS.Region)) if err != nil { - return "", fmt.Errorf("failed to get VPC from subnets: %w", err) + return "", fmt.Errorf("failed to load AWS config: %w", err) + } + + client := ec2.NewFromConfig(cfg, func(options *ec2.Options) { + options.Region = ic.Config.AWS.Region + for _, endpoint := range ic.Config.AWS.ServiceEndpoints { + if strings.EqualFold(endpoint.Name, "ec2") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) + + paginator := ec2.NewDescribeSubnetsPaginator(client, &ec2.DescribeSubnetsInput{SubnetIds: subnetIDs}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return "", fmt.Errorf("failed to get VPC from subnets: %w", err) + } + for _, subnet := range page.Subnets { + if subnet.SubnetId == nil { + continue + } + if subnet.SubnetArn == nil { + return "", fmt.Errorf("%s has no ARN", *subnet.SubnetId) + } + if subnet.VpcId == nil { + return "", fmt.Errorf("%s has no VPC", *subnet.SubnetId) + } + if subnet.AvailabilityZone == nil { + return "", fmt.Errorf("%s has no availability zone", *subnet.SubnetId) + } + vpcID = *subnet.VpcId + // All subnets belong to the same VPC + break + } + } + + if vpcID == "" { + return "", fmt.Errorf("no VPC found for subnets %v", subnetIDs) } return vpcID, nil } // getHostedZoneIDForNLB returns the HostedZone ID for a region from a known table or queries it from the LB instead. -func getHostedZoneIDForNLB(ctx context.Context, awsSession *session.Session, region string, lbName string) (string, error) { - if hzID, ok := awsconfig.HostedZoneIDPerRegionNLBMap[region]; ok { +func getHostedZoneIDForNLB(ctx context.Context, ic *installconfig.InstallConfig, lbName string) (string, error) { + if hzID, ok := awsconfig.HostedZoneIDPerRegionNLBMap[ic.Config.AWS.Region]; ok { return hzID, nil } + + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(ic.Config.Platform.AWS.Region)) + if err != nil { + return "", fmt.Errorf("failed to load AWS config: %w", err) + } + + client := elbv2.NewFromConfig(cfg, func(options *elbv2.Options) { + options.Region = ic.Config.Platform.AWS.Region + for _, endpoint := range ic.Config.AWS.ServiceEndpoints { + if strings.EqualFold(endpoint.Name, "elasticloadbalancing") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) + // If the HostedZoneID is not known, query from the LoadBalancer input := elbv2.DescribeLoadBalancersInput{ - Names: aws.StringSlice([]string{lbName}), + Names: []string{lbName}, } - res, err := elbv2.New(awsSession).DescribeLoadBalancersWithContext(ctx, &input) + + res, err := client.DescribeLoadBalancers(ctx, &input) if err != nil { - var awsErr awserr.Error - if errors.As(err, &awsErr) && awsErr.Code() == elbv2.ErrCodeLoadBalancerNotFoundException { + var lbError *elbv2types.LoadBalancerNotFoundException + if errors.As(err, &lbError) { return "", errNotFound } return "", fmt.Errorf("failed to list load balancers: %w", err) @@ -426,17 +451,8 @@ func isSSHRuleGone(ctx context.Context, session *session.Session, region, sgID s // PostDestroy deletes the ignition bucket after capi stopped running, so it won't try to reconcile the bucket. func (p *Provider) PostDestroy(ctx context.Context, in clusterapi.PostDestroyerInput) error { - region := in.Metadata.AWS.Region - session, err := awsconfig.GetSessionWithOptions( - awsconfig.WithRegion(region), - awsconfig.WithServiceEndpoints(region, in.Metadata.AWS.ServiceEndpoints), - ) - if err != nil { - return fmt.Errorf("failed to create aws session: %w", err) - } - bucketName := awsmanifest.GetIgnitionBucketName(in.Metadata.InfraID) - if err := removeS3Bucket(ctx, session, bucketName); err != nil { + if err := removeS3Bucket(ctx, in.Metadata.AWS.Region, bucketName, in.Metadata.AWS.ServiceEndpoints); err != nil { if p.bestEffortDeleteIgnition { logrus.Warnf("failed to delete ignition bucket %s: %v", bucketName, err) return nil @@ -448,46 +464,56 @@ func (p *Provider) PostDestroy(ctx context.Context, in clusterapi.PostDestroyerI } // removeS3Bucket deletes an s3 bucket given its name. -func removeS3Bucket(ctx context.Context, session *session.Session, bucketName string) error { - client := s3.New(session) - - iter := s3manager.NewDeleteListIterator(client, &s3.ListObjectsInput{ - Bucket: aws.String(bucketName), - }) - err := s3manager.NewBatchDeleteWithClient(client).Delete(ctx, iter) - if err != nil && !isBucketNotFound(err) { - return err +func removeS3Bucket(ctx context.Context, region string, bucketName string, endpoints []awstypes.ServiceEndpoint) error { + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(region)) + if err != nil { + return fmt.Errorf("failed to load AWS config: %w", err) } - logrus.Debugf("bucket %q emptied", bucketName) - if _, err := client.DeleteBucketWithContext(ctx, &s3.DeleteBucketInput{Bucket: aws.String(bucketName)}); err != nil { - if isBucketNotFound(err) { + client := s3.NewFromConfig(cfg, func(options *s3.Options) { + options.Region = region + for _, endpoint := range endpoints { + if strings.EqualFold(endpoint.Name, "s3") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) + + paginator := s3.NewListObjectsV2Paginator(client, &s3.ListObjectsV2Input{Bucket: aws.String(bucketName)}) + for paginator.HasMorePages() { + page, err := paginator.NextPage(ctx) + if err != nil { + return fmt.Errorf("failed to list objects in bucket %s: %w", bucketName, err) + } + + var objects []s3types.ObjectIdentifier + for _, object := range page.Contents { + objects = append(objects, s3types.ObjectIdentifier{ + Key: object.Key, + }) + } + + if len(objects) > 0 { + if _, err = client.DeleteObjects(ctx, &s3.DeleteObjectsInput{ + Bucket: aws.String(bucketName), + Delete: &s3types.Delete{ + Objects: objects, + }, + }); err != nil { + return fmt.Errorf("failed to delete objects in bucket %s: %w", bucketName, err) + } + } + } + + if _, err := client.DeleteBucket(ctx, &s3.DeleteBucketInput{Bucket: aws.String(bucketName)}); err != nil { + var noSuckBucket *s3types.NoSuchBucket + if errors.As(err, &noSuckBucket) { logrus.Debugf("bucket %q already deleted", bucketName) return nil } - return err + return fmt.Errorf("failed to delete bucket %s: %w", bucketName, err) } + + logrus.Debugf("bucket %q emptied", bucketName) return nil } - -func isBucketNotFound(err interface{}) bool { - switch s3Err := err.(type) { - case awserr.Error: - if s3Err.Code() == s3.ErrCodeNoSuchBucket { - return true - } - origErr := s3Err.OrigErr() - if origErr != nil { - return isBucketNotFound(origErr) - } - case s3manager.Error: - if s3Err.OrigErr != nil { - return isBucketNotFound(s3Err.OrigErr) - } - case s3manager.Errors: - if len(s3Err) == 1 { - return isBucketNotFound(s3Err[0]) - } - } - return false -} diff --git a/pkg/infrastructure/aws/clusterapi/iam.go b/pkg/infrastructure/aws/clusterapi/iam.go index eef48bc893..6ee437fd99 100644 --- a/pkg/infrastructure/aws/clusterapi/iam.go +++ b/pkg/infrastructure/aws/clusterapi/iam.go @@ -5,11 +5,15 @@ import ( "encoding/json" "errors" "fmt" + "net/url" + "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go-v2/aws" + configv2 "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/iam" + iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/sirupsen/logrus" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" @@ -95,14 +99,22 @@ func createIAMRoles(ctx context.Context, infraID string, ic *installconfig.Insta logrus.Infoln("Reconciling IAM roles for control-plane and compute nodes") // Create the IAM Role with the aws sdk. // https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#IAM.CreateRole - session, err := ic.AWS.Session(ctx) + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(ic.Config.Platform.AWS.Region)) if err != nil { - return fmt.Errorf("failed to load AWS session: %w", err) + return fmt.Errorf("failed to load AWS config: %w", err) } - svc := iam.New(session) + + client := iam.NewFromConfig(cfg, func(options *iam.Options) { + options.Region = ic.Config.Platform.AWS.Region + for _, endpoint := range ic.Config.AWS.ServiceEndpoints { + if strings.EqualFold(endpoint.Name, "iam") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) // Create the IAM Roles for master and workers. - tags := []*iam.Tag{ + tags := []iamtypes.Tag{ { Key: aws.String(fmt.Sprintf("kubernetes.io/cluster/%s", infraID)), Value: aws.String("owned"), @@ -110,7 +122,7 @@ func createIAMRoles(ctx context.Context, infraID string, ic *installconfig.Insta } for k, v := range ic.Config.AWS.UserTags { - tags = append(tags, &iam.Tag{ + tags = append(tags, iamtypes.Tag{ Key: aws.String(k), Value: aws.String(v), }) @@ -123,7 +135,7 @@ func createIAMRoles(ctx context.Context, infraID string, ic *installconfig.Insta Effect: "Allow", Principal: iamv1.Principals{ iamv1.PrincipalService: []string{ - getPartitionService(ic.AWS.Region), + getPartitionDNSSuffix(ic.AWS.Region), }, }, Action: iamv1.Actions{ @@ -162,30 +174,36 @@ func createIAMRoles(ctx context.Context, infraID string, ic *installconfig.Insta continue } - roleName, err := getOrCreateIAMRole(ctx, role, infraID, string(assumePolicyBytes), *ic, tags, svc) + roleName, err := getOrCreateIAMRole(ctx, role, infraID, string(assumePolicyBytes), *ic, tags, client) if err != nil { return fmt.Errorf("failed to create IAM %s role: %w", role, err) } profileName := aws.String(fmt.Sprintf("%s-%s-profile", infraID, role)) - if _, err := svc.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: profileName}); err != nil { - var awsErr awserr.Error - if errors.As(err, &awsErr) && awsErr.Code() != iam.ErrCodeNoSuchEntityException { + if _, err := client.GetInstanceProfile(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: profileName}); err != nil { + var noSuchEntity *iamtypes.NoSuchEntityException + if !errors.As(err, &noSuchEntity) { return fmt.Errorf("failed to get %s instance profile: %w", role, err) } // If the profile does not exist, create it. - if _, err := svc.CreateInstanceProfileWithContext(ctx, &iam.CreateInstanceProfileInput{ + if _, err := client.CreateInstanceProfile(ctx, &iam.CreateInstanceProfileInput{ InstanceProfileName: profileName, Tags: tags, }); err != nil { return fmt.Errorf("failed to create %s instance profile: %w", role, err) } - if err := svc.WaitUntilInstanceProfileExistsWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: profileName}); err != nil { + + waiter := iam.NewInstanceProfileExistsWaiter(client) + if err := waiter.Wait(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: profileName}, 2*time.Minute, + func(o *iam.InstanceProfileExistsWaiterOptions) { + o.MaxDelay = 5 * time.Second + o.MinDelay = 1 * time.Second + }); err != nil { return fmt.Errorf("failed to wait for %s instance profile to exist: %w", role, err) } // Finally, attach the role to the profile. - if _, err := svc.AddRoleToInstanceProfileWithContext(ctx, &iam.AddRoleToInstanceProfileInput{ + if _, err := client.AddRoleToInstanceProfile(ctx, &iam.AddRoleToInstanceProfileInput{ InstanceProfileName: profileName, RoleName: aws.String(roleName), }); err != nil { @@ -199,7 +217,7 @@ func createIAMRoles(ctx context.Context, infraID string, ic *installconfig.Insta // getOrCreateRole returns the name of the IAM role to be used, // creating it when not specified by the user in the install config. -func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy string, ic installconfig.InstallConfig, tags []*iam.Tag, svc *iam.IAM) (string, error) { +func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy string, ic installconfig.InstallConfig, tags []iamtypes.Tag, svc *iam.Client) (string, error) { roleName := aws.String(fmt.Sprintf("%s-%s-role", infraID, nodeRole)) var defaultRole string @@ -224,11 +242,12 @@ func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy str return workerRole, nil } - if _, err := svc.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: roleName}); err != nil { - var awsErr awserr.Error - if errors.As(err, &awsErr) && awsErr.Code() != iam.ErrCodeNoSuchEntityException { + if _, err := svc.GetRole(ctx, &iam.GetRoleInput{RoleName: roleName}); err != nil { + var noSuchRoleError *iamtypes.NoSuchEntityException + if !errors.As(err, &noSuchRoleError) { return "", fmt.Errorf("failed to get %s role: %w", nodeRole, err) } + // If the role does not exist, create it. logrus.Infof("Creating IAM role for %s", nodeRole) createRoleInput := &iam.CreateRoleInput{ @@ -236,11 +255,15 @@ func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy str AssumeRolePolicyDocument: aws.String(assumePolicy), Tags: tags, } - if _, err := svc.CreateRoleWithContext(ctx, createRoleInput); err != nil { + if _, err := svc.CreateRole(ctx, createRoleInput); err != nil { return "", fmt.Errorf("failed to create %s role: %w", nodeRole, err) } - - if err := svc.WaitUntilRoleExistsWithContext(ctx, &iam.GetRoleInput{RoleName: roleName}); err != nil { + waiter := iam.NewRoleExistsWaiter(svc) + if err := waiter.Wait(ctx, &iam.GetRoleInput{RoleName: roleName}, 2*time.Minute, + func(o *iam.RoleExistsWaiterOptions) { + o.MaxDelay = 5 * time.Second + o.MinDelay = 1 * time.Second + }); err != nil { return "", fmt.Errorf("failed to wait for %s role to exist: %w", nodeRole, err) } } @@ -251,7 +274,7 @@ func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy str if err != nil { return "", fmt.Errorf("failed to marshal %s policy: %w", nodeRole, err) } - if _, err := svc.PutRolePolicyWithContext(ctx, &iam.PutRolePolicyInput{ + if _, err := svc.PutRolePolicy(ctx, &iam.PutRolePolicyInput{ PolicyDocument: aws.String(string(b)), PolicyName: policyName, RoleName: roleName, @@ -262,10 +285,28 @@ func getOrCreateIAMRole(ctx context.Context, nodeRole, infraID, assumePolicy str return *roleName, nil } -func getPartitionService(region string) string { - partitionDNSSuffix := "amazonaws.com" - if ps, found := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), region); found { - partitionDNSSuffix = ps.DNSSuffix() +func getPartitionDNSSuffix(region string) string { + endpoint, err := ec2.NewDefaultEndpointResolver().ResolveEndpoint(region, ec2.EndpointResolverOptions{}) + if err != nil { + logrus.Errorf("failed to resolve AWS ec2 endpoint: %v", err) + return "" } - return fmt.Sprintf("ec2.%s", partitionDNSSuffix) + + u, err := url.Parse(endpoint.URL) + if err != nil { + logrus.Errorf("failed to parse partition ID URL: %v", err) + return "" + } + + domain := "amazonaws.com" + // Extract the hostname + host := u.Hostname() + // Split the hostname by "." to get the domain parts + parts := strings.Split(host, ".") + if len(parts) > 2 { + domain = strings.Join(parts[2:], ".") + } + + logrus.Debugf("Using domain name: %s", domain) + return fmt.Sprintf("ec2.%s", domain) } diff --git a/pkg/infrastructure/aws/clusterapi/ignition.go b/pkg/infrastructure/aws/clusterapi/ignition.go index 0748419440..06c5f3471e 100644 --- a/pkg/infrastructure/aws/clusterapi/ignition.go +++ b/pkg/infrastructure/aws/clusterapi/ignition.go @@ -3,9 +3,12 @@ package clusterapi import ( "context" "fmt" + "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + configv2 "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/sirupsen/logrus" capa "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -33,26 +36,36 @@ func editIgnition(ctx context.Context, in clusterapi.IgnitionInput) (*clusterapi return nil, fmt.Errorf("failed to get AWSCluster: %w", err) } - awsSession, err := in.InstallConfig.AWS.Session(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get aws session: %w", err) - } - // There is no direct access to load balancer IP addresses, so the security groups // are used here to find the network interfaces that correspond to the load balancers. - securityGroupIDs := make([]*string, 0, len(awsCluster.Status.Network.SecurityGroups)) + securityGroupIDs := make([]string, 0, len(awsCluster.Status.Network.SecurityGroups)) for _, securityGroup := range awsCluster.Status.Network.SecurityGroups { - securityGroupIDs = append(securityGroupIDs, aws.String(securityGroup.ID)) + securityGroupIDs = append(securityGroupIDs, securityGroup.ID) } nicInput := ec2.DescribeNetworkInterfacesInput{ - Filters: []*ec2.Filter{ + Filters: []ec2types.Filter{ { Name: aws.String("group-id"), Values: securityGroupIDs, }, }, } - nicOutput, err := ec2.New(awsSession).DescribeNetworkInterfacesWithContext(ctx, &nicInput) + + cfg, err := configv2.LoadDefaultConfig(ctx, configv2.WithRegion(in.InstallConfig.Config.AWS.Region)) + if err != nil { + return nil, fmt.Errorf("failed to load AWS config: %w", err) + } + + client := ec2.NewFromConfig(cfg, func(options *ec2.Options) { + options.Region = in.InstallConfig.Config.AWS.Region + for _, endpoint := range in.InstallConfig.Config.AWS.ServiceEndpoints { + if strings.EqualFold(endpoint.Name, "ec2") { + options.BaseEndpoint = aws.String(endpoint.URL) + } + } + }) + + nicOutput, err := client.DescribeNetworkInterfaces(ctx, &nicInput) if err != nil { return nil, fmt.Errorf("failed to describe network interfaces: %w", err) }