chore(deps): bump cloud.google.com/go/storage from 1.35.1 to 1.36.0 (#2913)

Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.35.1 to 1.36.0.
- [Release notes](https://github.com/googleapis/google-cloud-go/releases)
- [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...spanner/v1.36.0)

---
updated-dependencies:
- dependency-name: cloud.google.com/go/storage
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-12-19 08:34:19 -08:00 committed by GitHub
parent 701bde3525
commit d42538bbab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 274 additions and 84 deletions

2
go.mod
View File

@ -3,7 +3,7 @@ module github.com/GoogleContainerTools/kaniko
go 1.19 go 1.19
require ( require (
cloud.google.com/go/storage v1.35.1 cloud.google.com/go/storage v1.36.0
github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/azure-storage-blob-go v0.14.0
// This docker-credential-gcr dependency version is actually the same as v2.1.8. // This docker-credential-gcr dependency version is actually the same as v2.1.8.
// See https://github.com/GoogleCloudPlatform/docker-credential-gcr/issues/128 // See https://github.com/GoogleCloudPlatform/docker-credential-gcr/issues/128

4
go.sum
View File

@ -44,8 +44,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=

View File

@ -1,6 +1,19 @@
# Changes # Changes
## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14)
### Features
* **storage:** Add object retention feature ([#9072](https://github.com/googleapis/google-cloud-go/issues/9072)) ([16ecfd1](https://github.com/googleapis/google-cloud-go/commit/16ecfd150ff1982f03d207a80a82e934d1013874))
### Bug Fixes
* **storage:** Do not inhibit the dead code elimination. ([#8543](https://github.com/googleapis/google-cloud-go/issues/8543)) ([ca2493f](https://github.com/googleapis/google-cloud-go/commit/ca2493f43c299bbaed5f7e5b70f66cc763ff9802))
* **storage:** Set flush and get_state to false on the last write in gRPC ([#9013](https://github.com/googleapis/google-cloud-go/issues/9013)) ([c1e9fe5](https://github.com/googleapis/google-cloud-go/commit/c1e9fe5f4166a71e55814ccf126926ec0e0e7945))
## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) ## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09)

View File

@ -41,13 +41,14 @@ import (
// BucketHandle provides operations on a Google Cloud Storage bucket. // BucketHandle provides operations on a Google Cloud Storage bucket.
// Use Client.Bucket to get a handle. // Use Client.Bucket to get a handle.
type BucketHandle struct { type BucketHandle struct {
c *Client c *Client
name string name string
acl ACLHandle acl ACLHandle
defaultObjectACL ACLHandle defaultObjectACL ACLHandle
conds *BucketConditions conds *BucketConditions
userProject string // project for Requester Pays buckets userProject string // project for Requester Pays buckets
retry *retryConfig retry *retryConfig
enableObjectRetention *bool
} }
// Bucket returns a BucketHandle, which provides operations on the named bucket. // Bucket returns a BucketHandle, which provides operations on the named bucket.
@ -85,7 +86,8 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
defer func() { trace.EndSpan(ctx, err) }() defer func() { trace.EndSpan(ctx, err) }()
o := makeStorageOpts(true, b.retry, b.userProject) o := makeStorageOpts(true, b.retry, b.userProject)
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil {
return err return err
} }
return nil return nil
@ -462,6 +464,15 @@ type BucketAttrs struct {
// allows for the automatic selection of the best storage class // allows for the automatic selection of the best storage class
// based on object access patterns. // based on object access patterns.
Autoclass *Autoclass Autoclass *Autoclass
// ObjectRetentionMode reports whether individual objects in the bucket can
// be configured with a retention policy. An empty value means that object
// retention is disabled.
// This field is read-only. Object retention can be enabled only by creating
// a bucket with SetObjectRetention set to true on the BucketHandle. It
// cannot be modified once the bucket is created.
// ObjectRetention cannot be configured or reported through the gRPC API.
ObjectRetentionMode string
} }
// BucketPolicyOnly is an alias for UniformBucketLevelAccess. // BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@ -757,6 +768,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &BucketAttrs{ return &BucketAttrs{
Name: b.Name, Name: b.Name,
Location: b.Location, Location: b.Location,
@ -771,6 +783,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
RequesterPays: b.Billing != nil && b.Billing.RequesterPays, RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle), Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp, RetentionPolicy: rp,
ObjectRetentionMode: toBucketObjectRetention(b.ObjectRetention),
CORS: toCORS(b.Cors), CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption), Encryption: toBucketEncryption(b.Encryption),
Logging: toBucketLogging(b.Logging), Logging: toBucketLogging(b.Logging),
@ -1348,6 +1361,17 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
} }
// SetObjectRetention returns a new BucketHandle that will enable object retention
// on bucket creation. To enable object retention, you must use the returned
// handle to create the bucket. This has no effect on an already existing bucket.
// ObjectRetention is not enabled by default.
// ObjectRetention cannot be configured through the gRPC API.
func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle {
b2 := *b
b2.enableObjectRetention = &enable
return &b2
}
// applyBucketConds modifies the provided call using the conditions in conds. // applyBucketConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall. // call is something that quacks like a *raw.WhateverCall.
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
@ -1360,11 +1384,11 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
cval := reflect.ValueOf(call) cval := reflect.ValueOf(call)
switch { switch {
case conds.MetagenerationMatch != 0: case conds.MetagenerationMatch != 0:
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
} }
case conds.MetagenerationNotMatch != 0: case conds.MetagenerationNotMatch != 0:
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
} }
} }
@ -1447,6 +1471,13 @@ func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *Retention
} }
} }
func toBucketObjectRetention(or *raw.BucketObjectRetention) string {
if or == nil {
return ""
}
return or.Mode
}
func toRawCORS(c []CORS) []*raw.BucketCors { func toRawCORS(c []CORS) []*raw.BucketCors {
var out []*raw.BucketCors var out []*raw.BucketCors
for _, v := range c { for _, v := range c {

View File

@ -44,7 +44,7 @@ type storageClient interface {
// Top-level methods. // Top-level methods.
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error)
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
Close() error Close() error
@ -60,7 +60,7 @@ type storageClient interface {
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
// Default Object ACL methods. // Default Object ACL methods.
@ -291,6 +291,15 @@ type newRangeReaderParams struct {
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
} }
type updateObjectParams struct {
bucket, object string
uattrs *ObjectAttrsToUpdate
gen int64
encryptionKey []byte
conds *Conditions
overrideRetention *bool
}
type composeObjectRequest struct { type composeObjectRequest struct {
dstBucket string dstBucket string
dstObject destinationObject dstObject destinationObject

View File

@ -152,7 +152,12 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin
return resp.EmailAddress, err return resp.EmailAddress, err
} }
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
if enableObjectRetention != nil {
// TO-DO: implement ObjectRetention once available - see b/308194853
return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
}
s := callSettings(c.settings, opts...) s := callSettings(c.settings, opts...)
b := attrs.toProtoBucket() b := attrs.toProtoBucket()
b.Project = toProjectResource(project) b.Project = toProjectResource(project)
@ -507,25 +512,30 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
return attrs, err return attrs, err
} }
func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
uattrs := params.uattrs
if params.overrideRetention != nil || uattrs.Retention != nil {
// TO-DO: implement ObjectRetention once available - see b/308194853
return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
}
s := callSettings(c.settings, opts...) s := callSettings(c.settings, opts...)
o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, params.bucket), params.object)
// For Update, generation is passed via the object message rather than a field on the request. // For Update, generation is passed via the object message rather than a field on the request.
if gen >= 0 { if params.gen >= 0 {
o.Generation = gen o.Generation = params.gen
} }
req := &storagepb.UpdateObjectRequest{ req := &storagepb.UpdateObjectRequest{
Object: o, Object: o,
PredefinedAcl: uattrs.PredefinedACL, PredefinedAcl: uattrs.PredefinedACL,
} }
if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, conds, req); err != nil { if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, params.conds, req); err != nil {
return nil, err return nil, err
} }
if s.userProject != "" { if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject) ctx = setUserProjectMetadata(ctx, s.userProject)
} }
if encryptionKey != nil { if params.encryptionKey != nil {
req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey)
} }
fieldMask := &fieldmaskpb.FieldMask{Paths: nil} fieldMask := &fieldmaskpb.FieldMask{Paths: nil}
@ -739,7 +749,8 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object
} }
uattrs := &ObjectAttrsToUpdate{ACL: acl} uattrs := &ObjectAttrsToUpdate{ACL: acl}
// Call UpdateObject with the specified metageneration. // Call UpdateObject with the specified metageneration.
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}}
if _, err = c.UpdateObject(ctx, params, opts...); err != nil {
return err return err
} }
return nil return nil
@ -769,7 +780,8 @@ func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object
acl = append(attrs.ACL, aclRule) acl = append(attrs.ACL, aclRule)
uattrs := &ObjectAttrsToUpdate{ACL: acl} uattrs := &ObjectAttrsToUpdate{ACL: acl}
// Call UpdateObject with the specified metageneration. // Call UpdateObject with the specified metageneration.
if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}}
if _, err = c.UpdateObject(ctx, params, opts...); err != nil {
return err return err
} }
return nil return nil
@ -1049,6 +1061,13 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
return return
} }
if params.attrs.Retention != nil {
// TO-DO: remove once ObjectRetention is available - see b/308194853
err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
errorf(err)
pr.CloseWithError(err)
return
}
// The chunk buffer is full, but there is no end in sight. This // The chunk buffer is full, but there is no end in sight. This
// means that either: // means that either:
// 1. A resumable upload will need to be used to send // 1. A resumable upload will need to be used to send
@ -1629,8 +1648,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
}, },
WriteOffset: writeOffset, WriteOffset: writeOffset,
FinishWrite: lastWriteOfEntireObject, FinishWrite: lastWriteOfEntireObject,
Flush: remainingDataFitsInSingleReq, Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
StateLookup: remainingDataFitsInSingleReq, StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
} }
// Open a new stream if necessary and set the first_message field on // Open a new stream if necessary and set the first_message field on
@ -1723,32 +1742,33 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
return nil, writeOffset, nil return nil, writeOffset, nil
} }
// Done sending data (remainingDataFitsInSingleReq should == true if we // Done sending the data in the buffer (remainingDataFitsInSingleReq
// reach this code). Receive from the stream to confirm the persisted data. // should == true if we reach this code).
resp, err := w.stream.Recv() // If we are done sending the whole object, close the stream and get the final
// object. Otherwise, receive from the stream to confirm the persisted data.
if !lastWriteOfEntireObject {
resp, err := w.stream.Recv()
// Retriable errors mean we should start over and attempt to // Retriable errors mean we should start over and attempt to
// resend the entire buffer via a new stream. // resend the entire buffer via a new stream.
// If not retriable, falling through will return the error received // If not retriable, falling through will return the error received
// from closing the stream. // from closing the stream.
if shouldRetry(err) { if shouldRetry(err) {
writeOffset, err = w.determineOffset(start) writeOffset, err = w.determineOffset(start)
if err != nil {
return nil, 0, err
}
sent = int(writeOffset) - int(start)
// Drop the stream reference as a new one will need to be created.
w.stream = nil
continue
}
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
sent = int(writeOffset) - int(start)
// Drop the stream reference as a new one will need to be created.
w.stream = nil
continue
}
if err != nil {
return nil, 0, err
}
// Confirm the persisted data if we have not finished uploading the object.
if !lastWriteOfEntireObject {
if resp.GetPersistedSize() != writeOffset { if resp.GetPersistedSize() != writeOffset {
// Retry if not all bytes were persisted. // Retry if not all bytes were persisted.
writeOffset = resp.GetPersistedSize() writeOffset = resp.GetPersistedSize()

View File

@ -159,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin
return res.EmailAddress, nil return res.EmailAddress, nil
} }
func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...) s := callSettings(c.settings, opts...)
var bkt *raw.Bucket var bkt *raw.Bucket
if attrs != nil { if attrs != nil {
@ -181,6 +181,9 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
} }
if enableObjectRetention != nil {
req.EnableObjectRetention(*enableObjectRetention)
}
var battrs *BucketAttrs var battrs *BucketAttrs
err := run(ctx, func(ctx context.Context) error { err := run(ctx, func(ctx context.Context) error {
b, err := req.Context(ctx).Do() b, err := req.Context(ctx).Do()
@ -431,7 +434,8 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string
return newObject(obj), nil return newObject(obj), nil
} }
func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
uattrs := params.uattrs
s := callSettings(c.settings, opts...) s := callSettings(c.settings, opts...)
var attrs ObjectAttrs var attrs ObjectAttrs
@ -496,11 +500,21 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str
// we don't append to nullFields here. // we don't append to nullFields here.
forceSendFields = append(forceSendFields, "Acl") forceSendFields = append(forceSendFields, "Acl")
} }
rawObj := attrs.toRawObject(bucket) if uattrs.Retention != nil {
// For ObjectRetention it's an error to send empty fields.
// Instead we send a null as the user's intention is to remove.
if uattrs.Retention.Mode == "" && uattrs.Retention.RetainUntil.IsZero() {
nullFields = append(nullFields, "Retention")
} else {
attrs.Retention = uattrs.Retention
forceSendFields = append(forceSendFields, "Retention")
}
}
rawObj := attrs.toRawObject(params.bucket)
rawObj.ForceSendFields = forceSendFields rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields rawObj.NullFields = nullFields
call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full") call := c.raw.Objects.Patch(params.bucket, params.object, rawObj).Projection("full")
if err := applyConds("Update", gen, conds, call); err != nil { if err := applyConds("Update", params.gen, params.conds, call); err != nil {
return nil, err return nil, err
} }
if s.userProject != "" { if s.userProject != "" {
@ -509,9 +523,14 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str
if uattrs.PredefinedACL != "" { if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL) call.PredefinedAcl(uattrs.PredefinedACL)
} }
if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil {
return nil, err return nil, err
} }
if params.overrideRetention != nil {
call.OverrideUnlockedRetention(*params.overrideRetention)
}
var obj *raw.Object var obj *raw.Object
var err error var err error
err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent)

View File

@ -15,4 +15,4 @@
package internal package internal
// Version is the current tagged release of the library. // Version is the current tagged release of the library.
const Version = "1.35.1" const Version = "1.36.0"

View File

@ -879,16 +879,17 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. // ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle. // Use BucketHandle.Object to get a handle.
type ObjectHandle struct { type ObjectHandle struct {
c *Client c *Client
bucket string bucket string
object string object string
acl ACLHandle acl ACLHandle
gen int64 // a negative value indicates latest gen int64 // a negative value indicates latest
conds *Conditions conds *Conditions
encryptionKey []byte // AES-256 key encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip readCompressed bool // Accept-Encoding: gzip
retry *retryConfig retry *retryConfig
overrideRetention *bool
} }
// ACL provides access to the object's access control list. // ACL provides access to the object's access control list.
@ -958,7 +959,15 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
} }
isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) return o.c.tc.UpdateObject(ctx,
&updateObjectParams{
bucket: o.bucket,
object: o.object,
uattrs: &uattrs,
gen: o.gen,
encryptionKey: o.encryptionKey,
conds: o.conds,
overrideRetention: o.overrideRetention}, opts...)
} }
// BucketName returns the name of the bucket. // BucketName returns the name of the bucket.
@ -973,16 +982,19 @@ func (o *ObjectHandle) ObjectName() string {
// ObjectAttrsToUpdate is used to update the attributes of an object. // ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated. // Only fields set to non-nil values will be updated.
// For all fields except CustomTime, set the field to its zero value to delete // For all fields except CustomTime and Retention, set the field to its zero
// it. CustomTime cannot be deleted or changed to an earlier time once set. // value to delete it. CustomTime cannot be deleted or changed to an earlier
// time once set. Retention can be deleted (only if the Mode is Unlocked) by
// setting it to an empty value (not nil).
// //
// For example, to change ContentType and delete ContentEncoding and // For example, to change ContentType and delete ContentEncoding, Metadata and
// Metadata, use // Retention, use:
// //
// ObjectAttrsToUpdate{ // ObjectAttrsToUpdate{
// ContentType: "text/html", // ContentType: "text/html",
// ContentEncoding: "", // ContentEncoding: "",
// Metadata: map[string]string{}, // Metadata: map[string]string{},
// Retention: &ObjectRetention{},
// } // }
type ObjectAttrsToUpdate struct { type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool EventBasedHold optional.Bool
@ -999,6 +1011,12 @@ type ObjectAttrsToUpdate struct {
// If not empty, applies a predefined set of access controls. ACL must be nil. // If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string PredefinedACL string
// Retention contains the retention configuration for this object.
// Operations other than setting the retention for the first time or
// extending the RetainUntil time on the object retention must be done
// on an ObjectHandle with OverrideUnlockedRetention set to true.
Retention *ObjectRetention
} }
// Delete deletes the single specified object. // Delete deletes the single specified object.
@ -1020,6 +1038,17 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
return &o2 return &o2
} }
// OverrideUnlockedRetention provides an option for overriding an Unlocked
// Retention policy. This must be set to true in order to change a policy
// from Unlocked to Locked, to set it to null, or to reduce its
// RetainUntil attribute. It is not required for setting the ObjectRetention for
// the first time nor for extending the RetainUntil time.
func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle {
o2 := *o
o2.overrideRetention = &override
return &o2
}
// NewWriter returns a storage Writer that writes to the GCS object // NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle. // associated with this ObjectHandle.
// //
@ -1109,6 +1138,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
Acl: toRawObjectACL(o.ACL), Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata, Metadata: o.Metadata,
CustomTime: ct, CustomTime: ct,
Retention: o.Retention.toRawObjectRetention(),
} }
} }
@ -1344,6 +1374,42 @@ type ObjectAttrs struct {
// For non-composite objects, the value will be zero. // For non-composite objects, the value will be zero.
// This field is read-only. // This field is read-only.
ComponentCount int64 ComponentCount int64
// Retention contains the retention configuration for this object.
// ObjectRetention cannot be configured or reported through the gRPC API.
Retention *ObjectRetention
}
// ObjectRetention contains the retention configuration for this object.
type ObjectRetention struct {
// Mode is the retention policy's mode on this object. Valid values are
// "Locked" and "Unlocked".
// Locked retention policies cannot be changed. Unlocked policies require an
// override to change.
Mode string
// RetainUntil is the time this object will be retained until.
RetainUntil time.Time
}
func (r *ObjectRetention) toRawObjectRetention() *raw.ObjectRetention {
if r == nil {
return nil
}
return &raw.ObjectRetention{
Mode: r.Mode,
RetainUntilTime: r.RetainUntil.Format(time.RFC3339),
}
}
func toObjectRetention(r *raw.ObjectRetention) *ObjectRetention {
if r == nil {
return nil
}
return &ObjectRetention{
Mode: r.Mode,
RetainUntil: convertTime(r.RetainUntilTime),
}
} }
// convertTime converts a time in RFC3339 format to time.Time. // convertTime converts a time in RFC3339 format to time.Time.
@ -1415,6 +1481,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Etag: o.Etag, Etag: o.Etag,
CustomTime: convertTime(o.CustomTime), CustomTime: convertTime(o.CustomTime),
ComponentCount: o.ComponentCount, ComponentCount: o.ComponentCount,
Retention: toObjectRetention(o.Retention),
} }
} }
@ -1587,6 +1654,7 @@ var attrToFieldMap = map[string]string{
"Etag": "etag", "Etag": "etag",
"CustomTime": "customTime", "CustomTime": "customTime",
"ComponentCount": "componentCount", "ComponentCount": "componentCount",
"Retention": "retention",
} }
// attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
@ -1621,6 +1689,7 @@ var attrToProtoFieldMap = map[string]string{
"ComponentCount": "component_count", "ComponentCount": "component_count",
// MediaLink was explicitly excluded from the proto as it is an HTTP-ism. // MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
// "MediaLink": "mediaLink", // "MediaLink": "mediaLink",
// TODO: add object retention - b/308194853
} }
// SetAttrSelection makes the query populate only specific attributes of // SetAttrSelection makes the query populate only specific attributes of
@ -1806,7 +1875,7 @@ func (c *Conditions) isMetagenerationValid() bool {
func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
cval := reflect.ValueOf(call) cval := reflect.ValueOf(call)
if gen >= 0 { if gen >= 0 {
if !setConditionField(cval, "Generation", gen) { if !setGeneration(cval, gen) {
return fmt.Errorf("storage: %s: generation not supported", method) return fmt.Errorf("storage: %s: generation not supported", method)
} }
} }
@ -1818,25 +1887,25 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e
} }
switch { switch {
case conds.GenerationMatch != 0: case conds.GenerationMatch != 0:
if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { if !setIfGenerationMatch(cval, conds.GenerationMatch) {
return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
} }
case conds.GenerationNotMatch != 0: case conds.GenerationNotMatch != 0:
if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { if !setIfGenerationNotMatch(cval, conds.GenerationNotMatch) {
return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
} }
case conds.DoesNotExist: case conds.DoesNotExist:
if !setConditionField(cval, "IfGenerationMatch", int64(0)) { if !setIfGenerationMatch(cval, int64(0)) {
return fmt.Errorf("storage: %s: DoesNotExist not supported", method) return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
} }
} }
switch { switch {
case conds.MetagenerationMatch != 0: case conds.MetagenerationMatch != 0:
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
} }
case conds.MetagenerationNotMatch != 0: case conds.MetagenerationNotMatch != 0:
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
} }
} }
@ -1897,16 +1966,45 @@ func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.Rewrite
return nil return nil
} }
// setConditionField sets a field on a *raw.WhateverCall. // setGeneration sets Generation on a *raw.WhateverCall.
// We can't use anonymous interfaces because the return type is // We can't use anonymous interfaces because the return type is
// different, since the field setters are builders. // different, since the field setters are builders.
func setConditionField(call reflect.Value, name string, value interface{}) bool { // We also make sure to supply a compile-time constant to MethodByName;
m := call.MethodByName(name) // otherwise, the Go Linker will disable dead code elimination, leading
if !m.IsValid() { // to larger binaries for all packages that import storage.
return false func setGeneration(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("Generation"), value)
}
// setIfGenerationMatch sets IfGenerationMatch on a *raw.WhateverCall.
// See also setGeneration.
func setIfGenerationMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfGenerationMatch"), value)
}
// setIfGenerationNotMatch sets IfGenerationNotMatch on a *raw.WhateverCall.
// See also setGeneration.
func setIfGenerationNotMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfGenerationNotMatch"), value)
}
// setIfMetagenerationMatch sets IfMetagenerationMatch on a *raw.WhateverCall.
// See also setGeneration.
func setIfMetagenerationMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfMetagenerationMatch"), value)
}
// setIfMetagenerationNotMatch sets IfMetagenerationNotMatch on a *raw.WhateverCall.
// See also setGeneration.
func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value)
}
func setCondition(setter reflect.Value, value interface{}) bool {
if setter.IsValid() {
setter.Call([]reflect.Value{reflect.ValueOf(value)})
} }
m.Call([]reflect.Value{reflect.ValueOf(value)}) return setter.IsValid()
return true
} }
// Retryer returns an object handle that is configured with custom retry // Retryer returns an object handle that is configured with custom retry

2
vendor/modules.txt vendored
View File

@ -14,7 +14,7 @@ cloud.google.com/go/compute/metadata
## explicit; go 1.19 ## explicit; go 1.19
cloud.google.com/go/iam cloud.google.com/go/iam
cloud.google.com/go/iam/apiv1/iampb cloud.google.com/go/iam/apiv1/iampb
# cloud.google.com/go/storage v1.35.1 # cloud.google.com/go/storage v1.36.0
## explicit; go 1.19 ## explicit; go 1.19
cloud.google.com/go/storage cloud.google.com/go/storage
cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal