Bump github.com/aws/aws-sdk-go from 1.43.36 to 1.44.24 (#2111)
Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.36 to 1.44.24. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.36...v1.44.24) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
c7841d7653
commit
3d2c088655
2
go.mod
2
go.mod
|
|
@ -13,7 +13,7 @@ replace (
|
|||
require (
|
||||
cloud.google.com/go/storage v1.22.0
|
||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||
github.com/aws/aws-sdk-go v1.43.36
|
||||
github.com/aws/aws-sdk-go v1.44.24
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -276,8 +276,8 @@ github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi
|
|||
github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.43.36 h1:8a+pYKNT7wSxUy3fi5dSqKQdfmit7SYGg5fv4zf+WuA=
|
||||
github.com/aws/aws-sdk-go v1.43.36/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.24 h1:3nOkwJBJLiGBmJKWp3z0utyXuBkxyGkRRwWjrTItJaY=
|
||||
github.com/aws/aws-sdk-go v1.44.24/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250=
|
||||
github.com/aws/aws-sdk-go-v2 v1.14.0 h1:IzSYBJHu0ZdUi27kIW6xVrs0eSxI4AzwbenzfXhhVs4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.14.0/go.mod h1:ZA3Y8V0LrlWj63MQAnRHgKf/5QB//LSZCPNWlWrNGLU=
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -330,6 +330,9 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
|||
// WithSetRequestHeaders updates the operation request's HTTP header to contain
|
||||
// the header key value pairs provided. If the header key already exists in the
|
||||
// request's HTTP header set, the existing value(s) will be replaced.
|
||||
//
|
||||
// Header keys added will be added as canonical format with title casing
|
||||
// applied via http.Header.Set method.
|
||||
func WithSetRequestHeaders(h map[string]string) Option {
|
||||
return withRequestHeader(h).SetRequestHeaders
|
||||
}
|
||||
|
|
@ -338,6 +341,6 @@ type withRequestHeader map[string]string
|
|||
|
||||
func (h withRequestHeader) SetRequestHeaders(r *Request) {
|
||||
for k, v := range h {
|
||||
r.HTTPRequest.Header[k] = []string{v}
|
||||
r.HTTPRequest.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.43.36"
|
||||
const SDKVersion = "1.44.24"
|
||||
|
|
|
|||
|
|
@ -8113,8 +8113,9 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
|
|||
// Rules
|
||||
//
|
||||
// You specify the lifecycle configuration in your request body. The lifecycle
|
||||
// configuration is specified as XML consisting of one or more rules. Each rule
|
||||
// consists of the following:
|
||||
// configuration is specified as XML consisting of one or more rules. An Amazon
|
||||
// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not
|
||||
// adjustable. Each rule consists of the following:
|
||||
//
|
||||
// * Filter identifying a subset of objects to which the rule applies. The
|
||||
// filter can be based on a key name prefix, object tags, or a combination
|
||||
|
|
@ -10918,9 +10919,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
|
|||
// Part numbers can be any number from 1 to 10,000, inclusive. A part number
|
||||
// uniquely identifies a part and also defines its position within the object
|
||||
// being created. If you upload a new part using the same part number that was
|
||||
// used with a previous part, the previously uploaded part is overwritten. Each
|
||||
// part must be at least 5 MB in size, except the last part. There is no size
|
||||
// limit on the last part of your multipart upload.
|
||||
// used with a previous part, the previously uploaded part is overwritten.
|
||||
//
|
||||
// For information about maximum and minimum part sizes and other multipart
|
||||
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
|
||||
// in the Amazon S3 User Guide.
|
||||
//
|
||||
// To ensure that data is not corrupted when traversing the network, specify
|
||||
// the Content-MD5 header in the upload part request. Amazon S3 checks the part
|
||||
|
|
@ -11068,8 +11071,8 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
|
|||
// your request and a byte range by adding the request header x-amz-copy-source-range
|
||||
// in your request.
|
||||
//
|
||||
// The minimum allowable part size for a multipart upload is 5 MB. For more
|
||||
// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
|
||||
// For information about maximum and minimum part sizes and other multipart
|
||||
// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
|
||||
// in the Amazon S3 User Guide.
|
||||
//
|
||||
// Instead of using an existing object as part data, you might use the UploadPart
|
||||
|
|
@ -29347,9 +29350,9 @@ type NoncurrentVersionExpiration struct {
|
|||
NewerNoncurrentVersions *int64 `type:"integer"`
|
||||
|
||||
// Specifies the number of days an object is noncurrent before Amazon S3 can
|
||||
// perform the associated action. For information about the noncurrent days
|
||||
// calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
|
||||
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
|
||||
// perform the associated action. The value must be a non-zero positive integer.
|
||||
// For information about the noncurrent days calculations, see How Amazon S3
|
||||
// Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
|
||||
// in the Amazon S3 User Guide.
|
||||
NoncurrentDays *int64 `type:"integer"`
|
||||
}
|
||||
|
|
@ -29662,7 +29665,9 @@ type Object struct {
|
|||
//
|
||||
// * If an object is created by either the Multipart Upload or Part Copy
|
||||
// operation, the ETag is not an MD5 digest, regardless of the method of
|
||||
// encryption.
|
||||
// encryption. If an object is larger than 16 MB, the Amazon Web Services
|
||||
// Management Console will upload or copy that object as a Multipart Upload,
|
||||
// and therefore the ETag will not be an MD5 digest.
|
||||
ETag *string `type:"string"`
|
||||
|
||||
// The name that you assign to an object. You use the object key to retrieve
|
||||
|
|
|
|||
|
|
@ -1279,6 +1279,12 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
|
|||
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// No permissions are required for users to perform this operation. The purpose
|
||||
// of the sts:GetSessionToken operation is to authenticate the user using MFA.
|
||||
// You cannot use policies to control authentication operations. For more information,
|
||||
// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
// Session Duration
|
||||
//
|
||||
// The GetSessionToken operation must be called by using the long-term Amazon
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ func getStatFileContentUint64(filePath string) uint64 {
|
|||
|
||||
res, err := parseUint(trimmed, 10, 64)
|
||||
if err != nil {
|
||||
logrus.Errorf("Unable to parse %q as a uint from Cgroup file %q", string(contents), filePath)
|
||||
logrus.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), filePath)
|
||||
return res
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
|
|||
for _, fileToRemove := range filesToRemove {
|
||||
if rm, _ := fileutils.Matches(fileToRemove, excludes); rm {
|
||||
if err := c.Remove(fileToRemove); err != nil {
|
||||
logrus.Errorf("Failed to remove %s: %v", fileToRemove, err)
|
||||
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -681,7 +681,7 @@ func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
|
|||
if container.StreamConfig.Stdin() == nil && !container.Config.Tty {
|
||||
if iop.Stdin != nil {
|
||||
if err := iop.Stdin.Close(); err != nil {
|
||||
logrus.Warnf("Error closing stdin: %+v", err)
|
||||
logrus.Warnf("error closing stdin: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package container // import "github.com/docker/docker/container"
|
||||
|
|
@ -145,7 +144,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st
|
|||
|
||||
defer func() {
|
||||
if err := v.Unmount(id); err != nil {
|
||||
logrus.Warnf("Error while unmounting volume %s: %v", v.Name(), err)
|
||||
logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
|
||||
}
|
||||
}()
|
||||
if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
|
|||
// Connect stdin of container to the attach stdin stream.
|
||||
if cfg.Stdin != nil {
|
||||
group.Go(func() error {
|
||||
logrus.Debug("Attach: stdin: begin")
|
||||
defer logrus.Debug("Attach: stdin: end")
|
||||
logrus.Debug("attach: stdin: begin")
|
||||
defer logrus.Debug("attach: stdin: end")
|
||||
|
||||
defer func() {
|
||||
if cfg.CloseStdin && !cfg.TTY {
|
||||
|
|
@ -98,8 +98,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
|
|||
}
|
||||
|
||||
attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error {
|
||||
logrus.Debugf("Attach: %s: begin", name)
|
||||
defer logrus.Debugf("Attach: %s: end", name)
|
||||
logrus.Debugf("attach: %s: begin", name)
|
||||
defer logrus.Debugf("attach: %s: end", name)
|
||||
defer func() {
|
||||
// Make sure stdin gets closed
|
||||
if cfg.Stdin != nil {
|
||||
|
|
@ -132,7 +132,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
|
|||
|
||||
errs := make(chan error, 1)
|
||||
go func() {
|
||||
defer logrus.Debug("Attach done")
|
||||
defer logrus.Debug("attach done")
|
||||
groupErr := make(chan error, 1)
|
||||
go func() {
|
||||
groupErr <- group.Wait()
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
|
|||
c.wg.Add(1)
|
||||
go func() {
|
||||
if _, err := pools.Copy(w, r); err != nil {
|
||||
logrus.Errorf("Stream copy error: %v", err)
|
||||
logrus.Errorf("stream copy error: %v", err)
|
||||
}
|
||||
r.Close()
|
||||
c.wg.Done()
|
||||
|
|
@ -141,7 +141,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
|
|||
go func() {
|
||||
pools.Copy(iop.Stdin, stdin)
|
||||
if err := iop.Stdin.Close(); err != nil {
|
||||
logrus.Warnf("Failed to close stdin: %v", err)
|
||||
logrus.Warnf("failed to close stdin: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -382,7 +382,7 @@ func (v *memdbView) transform(container *Container) *Snapshot {
|
|||
for port, bindings := range container.NetworkSettings.Ports {
|
||||
p, err := nat.ParsePort(port.Port())
|
||||
if err != nil {
|
||||
logrus.Warnf("Invalid port map %+v", err)
|
||||
logrus.Warnf("invalid port map %+v", err)
|
||||
continue
|
||||
}
|
||||
if len(bindings) == 0 {
|
||||
|
|
@ -395,7 +395,7 @@ func (v *memdbView) transform(container *Container) *Snapshot {
|
|||
for _, binding := range bindings {
|
||||
h, err := nat.ParsePort(binding.HostPort)
|
||||
if err != nil {
|
||||
logrus.Warnf("Invalid host port map %+v", err)
|
||||
logrus.Warnf("invalid host port map %+v", err)
|
||||
continue
|
||||
}
|
||||
snapshot.Ports = append(snapshot.Ports, types.Port{
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
|
|||
if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
|
||||
if iop.Stdin != nil {
|
||||
if err := iop.Stdin.Close(); err != nil {
|
||||
logrus.Errorf("Error closing exec stdin: %+v", err)
|
||||
logrus.Errorf("error closing exec stdin: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ func IsInitialized(driverHome string) bool {
|
|||
return false
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Warnf("Graphdriver.IsInitialized: stat failed: %v", err)
|
||||
logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err)
|
||||
}
|
||||
return !isEmptyDir(driverHome)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ func (fl *follow) waitRead() error {
|
|||
}
|
||||
return errRetry
|
||||
case err := <-fl.fileWatcher.Errors():
|
||||
logrus.Debugf("Logger got error watching file: %v", err)
|
||||
logrus.Debugf("logger got error watching file: %v", err)
|
||||
// Something happened, let's try and stay alive and create a new watcher
|
||||
if fl.retries <= 5 {
|
||||
fl.fileWatcher.Close()
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ func (s *fs) Walk(f DigestWalkFunc) error {
|
|||
for _, v := range dir {
|
||||
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
|
||||
if err := dgst.Validate(); err != nil {
|
||||
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
|
||||
logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
|
||||
continue
|
||||
}
|
||||
if err := f(dgst); err != nil {
|
||||
|
|
|
|||
|
|
@ -69,19 +69,19 @@ func (is *store) restore() error {
|
|||
err := is.fs.Walk(func(dgst digest.Digest) error {
|
||||
img, err := is.Get(IDFromDigest(dgst))
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid image %v, %v", dgst, err)
|
||||
logrus.Errorf("invalid image %v, %v", dgst, err)
|
||||
return nil
|
||||
}
|
||||
var l layer.Layer
|
||||
if chainID := img.RootFS.ChainID(); chainID != "" {
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
logrus.Errorf("Not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem())
|
||||
logrus.Errorf("not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem())
|
||||
return nil
|
||||
}
|
||||
l, err = is.lss[img.OperatingSystem()].Get(chainID)
|
||||
if err != nil {
|
||||
if err == layer.ErrLayerDoesNotExist {
|
||||
logrus.Errorf("Layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem())
|
||||
logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem())
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
@ -244,7 +244,7 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) {
|
|||
}
|
||||
|
||||
if err := is.digestSet.Remove(id.Digest()); err != nil {
|
||||
logrus.Errorf("Error removing %s from digest set: %q", id, err)
|
||||
logrus.Errorf("error removing %s from digest set: %q", id, err)
|
||||
}
|
||||
delete(is.images, id)
|
||||
is.fs.Delete(id.Digest())
|
||||
|
|
@ -330,7 +330,7 @@ func (is *store) imagesMap(all bool) map[ID]*Image {
|
|||
}
|
||||
img, err := is.Get(id)
|
||||
if err != nil {
|
||||
logrus.Errorf("Invalid image access: %q, error: %q", id, err)
|
||||
logrus.Errorf("invalid image access: %q, error: %q", id, err)
|
||||
continue
|
||||
}
|
||||
images[id] = img
|
||||
|
|
|
|||
|
|
@ -339,7 +339,7 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) {
|
|||
}
|
||||
cacheID := strings.TrimSpace(string(contentBytes))
|
||||
if cacheID == "" {
|
||||
logrus.Error("Invalid cache ID")
|
||||
logrus.Error("invalid cache ID")
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -764,9 +764,9 @@ func (ls *layerStore) Cleanup() error {
|
|||
if err != nil {
|
||||
logrus.Errorf("Cannot get orphan layers: %v", err)
|
||||
}
|
||||
logrus.Debugf("Found %v orphan layers", len(orphanLayers))
|
||||
logrus.Debugf("found %v orphan layers", len(orphanLayers))
|
||||
for _, orphan := range orphanLayers {
|
||||
logrus.Debugf("Removing orphan layer, chain ID: %v , cache ID: %v", orphan.chainID, orphan.cacheID)
|
||||
logrus.Debugf("removing orphan layer, chain ID: %v , cache ID: %v", orphan.chainID, orphan.cacheID)
|
||||
err = ls.driver.Remove(orphan.cacheID)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer")
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
|
|||
|
||||
unpigzPath, err := exec.LookPath("unpigz")
|
||||
if err != nil {
|
||||
logrus.Debugf("Unpigz binary not found, falling back to go gzip library")
|
||||
logrus.Debugf("unpigz binary not found, falling back to go gzip library")
|
||||
return gzip.NewReader(buf)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -438,7 +438,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
|
|||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("Failed close Changes writer: %s", err)
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er
|
|||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("Copying %q from %q", sourceBase, sourceDir)
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}
|
|||
select {
|
||||
case <-timer.C:
|
||||
case <-chClose:
|
||||
logrus.Debugf("Watch for %s closed", f.Name())
|
||||
logrus.Debugf("watch for %s closed", f.Name())
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
//go:build linux || freebsd || openbsd
|
||||
// +build linux freebsd openbsd
|
||||
|
||||
// Package kernel provides helper function to get, parse and compare kernel
|
||||
|
|
@ -26,7 +25,7 @@ func GetKernelVersion() (*VersionInfo, error) {
|
|||
// the given version.
|
||||
func CheckKernelVersion(k, major, minor int) bool {
|
||||
if v, err := GetKernelVersion(); err != nil {
|
||||
logrus.Warnf("Error getting kernel version: %s", err)
|
||||
logrus.Warnf("error getting kernel version: %s", err)
|
||||
} else {
|
||||
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
|
||||
return false
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ func (hooks *Hooks) MarshalJSON() ([]byte, error) {
|
|||
case CommandHook:
|
||||
serializableHooks = append(serializableHooks, chook)
|
||||
default:
|
||||
logrus.Warnf("Cannot serialize hook of type %T, skipping", hook)
|
||||
logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ github.com/ProtonMail/go-crypto/openpgp/s2k
|
|||
# github.com/acomagu/bufpipe v1.0.3
|
||||
## explicit; go 1.12
|
||||
github.com/acomagu/bufpipe
|
||||
# github.com/aws/aws-sdk-go v1.43.36
|
||||
# github.com/aws/aws-sdk-go v1.44.24
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
|
|
|
|||
Loading…
Reference in New Issue