chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3 (#3076)

Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.51.4 to 1.52.1.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.51.4...service/s3/v1.52.1)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2024-03-20 19:45:11 -07:00 committed by GitHub
parent ba433abdb6
commit e5c5d333ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 224 additions and 129 deletions

2
go.mod
View File

@ -8,7 +8,7 @@ require (
github.com/aws/aws-sdk-go-v2 v1.25.3
github.com/aws/aws-sdk-go-v2/config v1.27.7
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9
github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.52.1
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231213181459-b0fcec718dc6
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589
github.com/containerd/cgroups v1.1.0 // indirect

6
go.sum
View File

@ -105,8 +105,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5 h1:K/NXvIftO
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.5/go.mod h1:cl9HGLV66EnCmMNzq4sYOti+/xo8w34CsgzVtm2GgsY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A=
github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk=
github.com/aws/aws-sdk-go-v2/service/s3 v1.52.1 h1:Y/TTvxMdYwNvhzolvneV1wEEN/ncQUSd1AnzFGTMPqM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.52.1/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.2 h1:XOPfar83RIRPEzfihnp+U6udOveKZJvPQ76SKWrLRHc=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.2/go.mod h1:Vv9Xyk1KMHXrR3vNQe8W5LMFdTjSeWk0gBZBzvf3Qa0=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.2 h1:pi0Skl6mNl2w8qWZXcdOyg197Zsf4G97U7Sso9JXGZE=
@ -688,8 +688,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=

View File

@ -1,3 +1,11 @@
# v1.52.1 (2024-03-15)
* **Documentation**: Documentation updates for Amazon S3.
# v1.52.0 (2024-03-13)
* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT).
# v1.51.4 (2024-03-07)
* **Bug Fix**: Remove dependency on go-cmp.

View File

@ -75,7 +75,7 @@ type AbortMultipartUploadInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -38,8 +38,8 @@ import (
// the request as appropriate). If the condition persists, the SDKs throw an
// exception (or, for the SDKs that don't use exceptions, they return an error).
// Note that if CompleteMultipartUpload fails, applications should be prepared to
// retry the failed requests. For more information, see Amazon S3 Error Best
// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html)
// retry any failed requests (including 500 error responses). For more information,
// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html)
// . You can't use Content-Type: application/x-www-form-urlencoded for the
// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
// header, CompleteMultipartUpload can still return a 200 OK response. For more
@ -118,7 +118,7 @@ type CompleteMultipartUploadInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -31,9 +31,12 @@ import (
// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Both the Region that you want to copy the object
// from and the Region that you want to copy the object to must be enabled for your
// account. Amazon S3 transfer acceleration does not support cross-Region copies.
// If you request a cross-Region copy using a transfer acceleration endpoint, you
// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
// account. For more information about how to enable a Region for your account, see
// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone)
// in the Amazon Web Services Account Management Guide. Amazon S3 transfer
// acceleration does not support cross-Region copies. If you request a cross-Region
// copy using a transfer acceleration endpoint, you get a 400 Bad Request error.
// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
// . Authentication and authorization All CopyObject requests must be
// authenticated and signed by using IAM credentials (access key ID and secret
// access key for the IAM identities). All headers with the x-amz- prefix,
@ -51,7 +54,7 @@ import (
// - If the source object is in a general purpose bucket, you must have
// s3:GetObject permission to read the source object that is being copied.
// - If the destination bucket is a general purpose bucket, you must have
// s3:PubObject permission to write the object copy to the destination bucket.
// s3:PutObject permission to write the object copy to the destination bucket.
// - Directory bucket permissions - You must have permissions in a bucket policy
// or an IAM identity-based policy based on the source and destination bucket types
// in a CopyObject operation.
@ -84,24 +87,26 @@ import (
// - If the error occurs during the copy operation, the error response is
// embedded in the 200 OK response. For example, in a cross-region copy, you may
// encounter throttling and receive a 200 OK response. For more information, see
// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK
// status code means the copy was accepted, but it doesn't mean the copy is
// complete. Another example is when you disconnect from Amazon S3 before the copy
// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
// response. You must stay connected to Amazon S3 until the entire response is
// successfully received and processed. If you call this API operation directly,
// make sure to design your application to parse the content of the response and
// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this
// condition. The SDKs detect the embedded error and apply error handling per your
// configuration settings (including automatically retrying the request as
// appropriate). If the condition persists, the SDKs throw an exception (or, for
// the SDKs that don't use exceptions, they return an error).
// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror)
// . The 200 OK status code means the copy was accepted, but it doesn't mean the
// copy is complete. Another example is when you disconnect from Amazon S3 before
// the copy is complete, Amazon S3 might cancel the copy and you may receive a
// 200 OK response. You must stay connected to Amazon S3 until the entire
// response is successfully received and processed. If you call this API operation
// directly, make sure to design your application to parse the content of the
// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs
// handle this condition. The SDKs detect the embedded error and apply error
// handling per your configuration settings (including automatically retrying the
// request as appropriate). If the condition persists, the SDKs throw an exception
// (or, for the SDKs that don't use exceptions, they return an error).
//
// Charge The copy request charge is based on the storage class and Region that
// you specify for the destination object. The request can also result in a data
// retrieval charge for the source if the source storage class bills for data
// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/)
// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// retrieval. If the copy source is in a different region, the data transfer is
// billed to the copy source account. For pricing information, see Amazon S3
// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory
// buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to CopyObject :
// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
@ -128,7 +133,7 @@ type CopyObjectInput struct {
// the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style
// requests are not supported. Directory bucket names must be unique in the chosen
// Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an

View File

@ -55,11 +55,18 @@ import (
// required.
// - S3 Object Ownership - If your CreateBucket request includes the
// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls
// permission is required. If your CreateBucket request sets BucketOwnerEnforced
// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access
// to an external Amazon Web Services account, your request fails with a 400
// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more
// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html)
// permission is required. To set an ACL on a bucket as part of a CreateBucket
// request, you must explicitly set S3 Object Ownership for the bucket to a
// different value than the default, BucketOwnerEnforced . Additionally, if your
// desired bucket ACL grants public access, you must first create the bucket
// (without the bucket ACL) and then explicitly disable Block Public Access on the
// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket
// with a public ACL, the request will fail. For the majority of modern use cases
// in S3, we recommend that you keep all Block Public Access settings enabled and
// keep ACLs disabled. If you would like to share data with users outside of your
// account, you can use bucket policies as needed. For more information, see
// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
// in the Amazon S3 User Guide.
// - S3 Block Public Access - If your specific use case requires granting public
// access to your S3 resources, you can disable Block Public Access. Specifically,
@ -115,7 +122,7 @@ type CreateBucketInput struct {
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide

View File

@ -169,7 +169,7 @@ type CreateMultipartUploadInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -64,7 +64,7 @@ type DeleteBucketInput struct {
// format https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide

View File

@ -73,7 +73,7 @@ type DeleteBucketPolicyInput struct {
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide

View File

@ -16,13 +16,25 @@ import (
// Removes an object from a bucket. The behavior depends on the bucket's
// versioning state:
//
// - If versioning is enabled, the operation removes the null version (if there
// is one) of an object and inserts a delete marker, which becomes the latest
// version of the object. If there isn't a null version, Amazon S3 does not remove
// any objects but will still respond that the command was successful.
// - If bucket versioning is not enabled, the operation permanently deletes the
// object.
//
// - If versioning is suspended or not enabled, the operation permanently
// deletes the object.
// - If bucket versioning is enabled, the operation inserts a delete marker,
// which becomes the current version of the object. To permanently delete an object
// in a versioned bucket, you must include the objects versionId in the request.
// For more information about versioning-enabled buckets, see Deleting object
// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html)
// .
//
// - If bucket versioning is suspended, the operation removes the object that
// has a null versionId , if there is one, and inserts a delete marker that
// becomes the current version of the object. If there isn't an object with a null
// versionId , and all versions of the object have a versionId , Amazon S3 does
// not remove the object and only inserts a delete marker. To permanently delete an
// object that has a versionId , you must include the objects versionId in the
// request. For more information about versioning-suspended buckets, see
// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html)
// .
//
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets. For this API operation, only the null value of the version ID is
@ -59,7 +71,8 @@ import (
// - s3:DeleteObject - To delete an object from a bucket, you must always have
// the s3:DeleteObject permission.
// - s3:DeleteObjectVersion - To delete a specific version of an object from a
// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission.
// versioning-enabled bucket, you must have the s3:DeleteObjectVersion
// permission.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
@ -100,7 +113,7 @@ type DeleteObjectInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -107,7 +107,7 @@ type DeleteObjectsInput struct {
// requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
// Path-style requests are not supported. Directory bucket names must be unique in
// the chosen Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an

View File

@ -16,13 +16,17 @@ import (
// This operation is not supported by directory buckets. Bucket lifecycle
// configuration now supports specifying a lifecycle rule using an object key name
// prefix, one or more object tags, or a combination of both. Accordingly, this
// section describes the latest API. The response describes the new filter element
// that you can use to specify a filter to select a subset of objects to which the
// rule applies. If you are using a previous version of the lifecycle
// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
// . Returns the lifecycle configuration information set on the bucket. For
// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
// prefix, one or more object tags, object size, or any combination of these.
// Accordingly, this section describes the latest API. The previous version of the
// API supported filtering based only on an object key name prefix, which is
// supported for backward compatibility. For the related API description, see
// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
// . Accordingly, this section describes the latest API. The response describes the
// new filter element that you can use to specify a filter to select a subset of
// objects to which the rule applies. If you are using a previous version of the
// lifecycle configuration, it still works. For the earlier action, Returns the
// lifecycle configuration information set on the bucket. For information about
// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
// . To use this operation, you must have permission to perform the
// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by
// default. The bucket owner can grant this permission to others. For more

View File

@ -76,7 +76,7 @@ type GetBucketPolicyInput struct {
// format https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide Access points - When you use this API operation with

View File

@ -132,7 +132,7 @@ type GetObjectInput struct {
// the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style
// requests are not supported. Directory bucket names must be unique in the chosen
// Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an

View File

@ -126,7 +126,7 @@ type GetObjectAttributesInput struct {
// requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
// Path-style requests are not supported. Directory bucket names must be unique in
// the chosen Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an

View File

@ -22,9 +22,9 @@ import (
// you have permission to access it. If the bucket does not exist or you do not
// have permission to access it, the HEAD request returns a generic 400 Bad Request
// , 403 Forbidden or 404 Not Found code. A message body is not included, so you
// cannot determine the exception beyond these error codes. Directory buckets - You
// must make requests for this API operation to the Zonal endpoint. These endpoints
// support virtual-hosted-style requests in the format
// cannot determine the exception beyond these HTTP response codes. Directory
// buckets - You must make requests for this API operation to the Zonal endpoint.
// These endpoints support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests
// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket
@ -77,7 +77,7 @@ type HeadBucketInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
@ -127,7 +127,7 @@ type HeadBucketOutput struct {
// The name of the location where the bucket will be created. For directory
// buckets, the AZ ID of the Availability Zone where the bucket is created. An
// example AZ ID value is usw2-az2 . This functionality is only supported by
// example AZ ID value is usw2-az1 . This functionality is only supported by
// directory buckets.
BucketLocationName *string

View File

@ -122,7 +122,7 @@ type HeadObjectInput struct {
// requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
// Path-style requests are not supported. Directory bucket names must be unique in
// the chosen Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an

View File

@ -101,7 +101,7 @@ type ListMultipartUploadsInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
@ -258,8 +258,12 @@ type ListMultipartUploadsOutput struct {
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Upload ID after which listing began. This functionality is not supported for
// directory buckets.
// Together with key-marker, specifies the multipart upload after which listing
// should begin. If key-marker is not specified, the upload-id-marker parameter is
// ignored. Otherwise, any multipart uploads for a key equal to the key-marker
// might be included in the list only if they have an upload ID lexicographically
// greater than the specified upload-id-marker . This functionality is not
// supported for directory buckets.
UploadIdMarker *string
// Container for elements related to a particular multipart upload. A response can

View File

@ -48,7 +48,7 @@ type ListObjectsInput struct {
// requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
// Path-style requests are not supported. Directory bucket names must be unique in
// the chosen Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
@ -141,7 +141,9 @@ type ListObjectsOutput struct {
// MaxKeys value.
Delimiter *string
// Encoding type used by Amazon S3 to encode object keys in the response.
// Encoding type used by Amazon S3 to encode object keys in the response. If using
// url , non-ASCII characters used in an object's key name will be URL encoded. For
// example, the object test_file(3).png will appear as test_file%283%29.png.
EncodingType types.EncodingType
// A flag that indicates whether Amazon S3 returned all of the results that

View File

@ -84,7 +84,7 @@ type ListObjectsV2Input struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
@ -122,7 +122,9 @@ type ListObjectsV2Input struct {
// in the Amazon S3 User Guide.
Delimiter *string
// Encoding type used by Amazon S3 to encode object keys in the response.
// Encoding type used by Amazon S3 to encode object keys in the response. If using
// url , non-ASCII characters used in an object's key name will be URL encoded. For
// example, the object test_file(3).png will appear as test_file%283%29.png.
EncodingType types.EncodingType
// The account ID of the expected bucket owner. If the account ID that you provide

View File

@ -86,7 +86,7 @@ type ListPartsInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
@ -225,9 +225,8 @@ type ListPartsOutput struct {
// returned as the object owner for all the parts.
Owner *types.Owner
// When a list is truncated, this element specifies the last part in the list, as
// well as the value to use for the part-number-marker request parameter in a
// subsequent request.
// Specifies the part after which listing should begin. Only parts with higher
// part numbers will be listed.
PartNumberMarker *string
// Container for elements related to a particular part. A response can contain

View File

@ -22,21 +22,19 @@ import (
// lifecycle configuration. For information about lifecycle configuration, see
// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html)
// . Bucket lifecycle configuration now supports specifying a lifecycle rule using
// an object key name prefix, one or more object tags, or a combination of both.
// Accordingly, this section describes the latest API. The previous version of the
// API supported filtering based only on an object key name prefix, which is
// supported for backward compatibility. For the related API description, see
// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
// an object key name prefix, one or more object tags, object size, or any
// combination of these. Accordingly, this section describes the latest API. The
// previous version of the API supported filtering based only on an object key name
// prefix, which is supported for backward compatibility. For the related API
// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
// . Rules You specify the lifecycle configuration in your request body. The
// lifecycle configuration is specified as XML consisting of one or more rules. An
// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not
// adjustable. Each rule consists of the following:
//
// - A filter identifying a subset of objects to which the rule applies. The
// filter can be based on a key name prefix, object tags, or a combination of both.
//
// filter can be based on a key name prefix, object tags, object size, or any
// combination of these.
// - A status indicating whether the rule is in effect.
//
// - One or more lifecycle transition and expiration actions that you want
// Amazon S3 to perform on the objects identified by the filter. If the state of
// your bucket is versioning-enabled or versioning-suspended, you can have many

View File

@ -79,7 +79,7 @@ type PutBucketPolicyInput struct {
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide

View File

@ -111,7 +111,7 @@ type PutObjectInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -17,7 +17,6 @@ import (
// This operation is not supported by directory buckets. Restores an archived copy
// of an object back into Amazon S3 This functionality is not supported for Amazon
// S3 on Outposts. This action performs the following types of requests:
// - select - Perform a select query on an archived object
// - restore an archive - Restore an archived object
//
// For more information about the S3 structure in the request body, see the
@ -28,36 +27,6 @@ import (
// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
// in the Amazon S3 User Guide
//
// Define the SQL expression for the SELECT type of restoration for your query in
// the request body's SelectParameters structure. You can use expressions like the
// following examples.
// - The following expression returns all records from the specified object.
// SELECT * FROM Object
// - Assuming that you are not using any headers for data stored in the object,
// you can specify columns with positional headers. SELECT s._1, s._2 FROM
// Object s WHERE s._3 > 100
// - If you have headers and you set the fileHeaderInfo in the CSV structure in
// the request body to USE , you can specify headers in the query. (If you set
// the fileHeaderInfo field to IGNORE , the first row is skipped for the query.)
// You cannot mix ordinal positions with header column names. SELECT s.Id,
// s.FirstName, s.SSN FROM S3Object s
//
// When making a select request, you can also do the following:
// - To expedite your queries, specify the Expedited tier. For more information
// about tiers, see "Restoring Archives," later in this topic.
// - Specify details about the data serialization format of both the input
// object that is being queried and the serialization of the CSV-encoded query
// results.
//
// The following are additional important facts about the select feature:
// - The output results are new Amazon S3 objects. Unlike archive retrievals,
// they are stored until explicitly deleted-manually or through a lifecycle
// configuration.
// - You can issue more than one select request on the same Amazon S3 object.
// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.
// - Amazon S3 accepts a select request even if the object has already been
// restored. A select request doesnt return error response 409 .
//
// Permissions To use this operation, you must have permissions to perform the
// s3:RestoreObject action. The bucket owner has this permission by default and can
// grant this permission to others. For more information about permissions, see
@ -141,8 +110,7 @@ import (
//
// - Code: RestoreAlreadyInProgress
//
// - Cause: Object restore is already in progress. (This error does not apply to
// SELECT type requests.)
// - Cause: Object restore is already in progress.
//
// - HTTP Status Code: 409 Conflict
//

View File

@ -132,7 +132,7 @@ type UploadPartInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -53,7 +53,7 @@ import (
// - If the source object is in a general purpose bucket, you must have the
// s3:GetObject permission to read the source object that is being copied.
// - If the destination bucket is a general purpose bucket, you must have the
// s3:PubObject permission to write the object copy to the destination bucket.
// s3:PutObject permission to write the object copy to the destination bucket.
// For information about permissions required to use the multipart upload API, see
// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// in the Amazon S3 User Guide.
@ -124,7 +124,7 @@ type UploadPartCopyInput struct {
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the

View File

@ -1352,6 +1352,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {
@ -1395,6 +1408,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {
@ -1446,6 +1472,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {
@ -1491,6 +1530,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {
@ -3755,6 +3807,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {
@ -3800,6 +3865,19 @@ func (r *resolver) ResolveEndpoint(
Properties: func() smithy.Properties {
var out smithy.Properties
smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
{
SchemeID: "aws.auth#sigv4a",
SignerProperties: func() smithy.Properties {
var sp smithy.Properties
smithyhttp.SetDisableDoubleEncoding(&sp, true)
smithyhttp.SetSigV4SigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts")
smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"})
return sp
}(),
},
{
SchemeID: "aws.auth#sigv4",
SignerProperties: func() smithy.Properties {

View File

@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.51.4"
const goModuleVersion = "1.52.1"

View File

@ -1280,8 +1280,15 @@ type ExistingObjectReplication struct {
noSmithyDocumentSerde
}
// Specifies the Amazon S3 object key name to filter on and whether to filter on
// the suffix or prefix of the key name.
// Specifies the Amazon S3 object key name to filter on. An object key name is the
// name assigned to an object in your Amazon S3 bucket. You specify whether to
// filter on the suffix or prefix of the object key name. A prefix is a specific
// string of characters at the beginning of an object key name, which you can use
// to organize objects. For example, you can start the key names of related objects
// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to
// find objects in a bucket with key names that have the same prefix. A suffix is
// similar to a prefix, but it is at the end of the object key name instead of at
// the beginning.
type FilterRule struct {
// The object key name prefix or suffix identifying one or more objects to which
@ -1783,7 +1790,9 @@ type LifecycleRuleAndOperator struct {
}
// The Filter is used to identify objects that a Lifecycle Rule applies to. A
// Filter must have exactly one of Prefix , Tag , or And specified.
// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan ,
// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the
// Lifecycle Rule applies to all objects in the bucket.
//
// The following types satisfy this interface:
//
@ -1855,8 +1864,8 @@ func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {}
type LocationInfo struct {
// The name of the location where the bucket will be created. For directory
// buckets, the AZ ID of the Availability Zone where the bucket will be created. An
// example AZ ID value is usw2-az2 .
// buckets, the name of the location is the AZ ID of the Availability Zone where
// the bucket will be created. An example AZ ID value is usw2-az1 .
Name *string
// The type of location where the bucket will be created.
@ -3137,8 +3146,8 @@ type ServerSideEncryptionByDefault struct {
// Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
// KMS key ID to use for the default encryption. This parameter is allowed if and
// only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias,
// or the Amazon Resource Name (ARN) of the KMS key.
// only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key
// ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.
// - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
// - Key ARN:
// arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

2
vendor/modules.txt vendored
View File

@ -266,7 +266,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
github.com/aws/aws-sdk-go-v2/service/internal/s3shared
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config
# github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4
# github.com/aws/aws-sdk-go-v2/service/s3 v1.52.1
## explicit; go 1.20
github.com/aws/aws-sdk-go-v2/service/s3
github.com/aws/aws-sdk-go-v2/service/s3/internal/arn