use aws-sdk-go-v2 (#2550)

Signed-off-by: BobDu <i@bobdu.cc>
This commit is contained in:
Bob Du 2023-06-08 05:54:30 +08:00 committed by GitHub
parent cce5d313b3
commit d54caaaa02
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
546 changed files with 94550 additions and 115355 deletions

31
go.mod
View File

@ -5,7 +5,10 @@ go 1.17
require (
cloud.google.com/go/storage v1.30.1
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/aws/aws-sdk-go v1.44.253
github.com/aws/aws-sdk-go-v2 v1.18.0
github.com/aws/aws-sdk-go-v2/config v1.18.25
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.67
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589
github.com/containerd/cgroups v1.1.0 // indirect
@ -58,17 +61,21 @@ require (
github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/aws/aws-sdk-go-v2 v1.16.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.15.5 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 // indirect
github.com/aws/smithy-go v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.24 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cilium/ebpf v0.9.1 // indirect

67
go.sum
View File

@ -98,49 +98,63 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/aws/aws-sdk-go v1.44.253 h1:iqDd0okcH4ShfFexz2zzf4VmeDFf6NOMm07pHnEb8iY=
github.com/aws/aws-sdk-go v1.44.253/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250=
github.com/aws/aws-sdk-go-v2 v1.14.0/go.mod h1:ZA3Y8V0LrlWj63MQAnRHgKf/5QB//LSZCPNWlWrNGLU=
github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM=
github.com/aws/aws-sdk-go-v2 v1.16.3/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU=
github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA=
github.com/aws/aws-sdk-go-v2/config v1.15.5 h1:P+xwhr6kabhxDTXTVH9YoHkqjLJ0wVVpIUHtFNr2hjU=
github.com/aws/aws-sdk-go-v2/config v1.15.5/go.mod h1:ZijHHh0xd/A+ZY53az0qzC5tT46kt4JVCePf2NX9Lk4=
github.com/aws/aws-sdk-go-v2/config v1.18.25 h1:JuYyZcnMPBiFqn87L2cRppo+rNwgah6YwD3VuyvaW6Q=
github.com/aws/aws-sdk-go-v2/config v1.18.25/go.mod h1:dZnYpD5wTW/dQF0rRNLVypB396zWCcPiBIvdvSWHEg4=
github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc=
github.com/aws/aws-sdk-go-v2/credentials v1.12.0 h1:4R/NqlcRFSkR0wxOhgHi+agGpbEr5qMCjn7VqUIJY+E=
github.com/aws/aws-sdk-go-v2/credentials v1.12.0/go.mod h1:9YWk7VW+eyKsoIL6/CljkTrNVWBSK9pkqOPUuijid4A=
github.com/aws/aws-sdk-go-v2/credentials v1.13.24 h1:PjiYyls3QdCrzqUN35jMWtUK1vqVZ+zLfdOa/UPFDp0=
github.com/aws/aws-sdk-go-v2/credentials v1.13.24/go.mod h1:jYPYi99wUOPIFi0rhiOvXeSEReVOzBqFNOX5bXYoG2o=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4 h1:FP8gquGeGHHdfY6G5llaMQDF+HAf20VKc8opRwmjf04=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.4/go.mod h1:u/s5/Z+ohUQOPXl00m2yJVyioWDECsbpXTQlaqSlufc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3 h1:jJPgroehGvjrde3XufFIJUZVK5A2L9a3KwSFgKy9n8w=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.3/go.mod h1:4Q0UFP0YJf0NrsEuEYHpM9fTSEVnD16Z3uyEF7J9JGM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.67 h1:fI9/5BDEaAv/pv1VO1X1n3jfP9it+IGqWsCuuBQI8wM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.67/go.mod h1:zQClPRIwQZfJlZq6WZve+s4Tb4JW+3V6eS+4+KrYeP8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.5/go.mod h1:2hXc8ooJqF2nAznsbJQIn+7h851/bu8GVC80OVTTqf8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10 h1:uFWgo6mGJI1n17nbcvSc6fxVuR3xLNqvXt12JCnEcT8=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.10/go.mod h1:F+EZtuIwjlv35kRJPyBGcsA4f7bnSoz15zOQ2lJq1Z4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33 h1:kG5eQilShqmJbv11XL1VpyDbaEJzWxd4zRiCG30GSn4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.3.0/go.mod h1:miRSv9l093jX/t/j+mBCaLqFHo9xKYzJ7DGm1BsGoJM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4 h1:cnsvEKSoHN4oAN7spMMr0zhEW2MHnhAVpmqQg8E6UcM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.4/go.mod h1:8glyUqVIM4AmeenIsPo0oVh3+NUwnsQml2OFupfQW+0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27 h1:vFQlirhuM8lLlpI7imKOMsjdQLuN9CPi+k44F/OFVsk=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11 h1:6cZRymlLEIlDTEB0+5+An6Zj1CKt6rSE69tOmFeu1nk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.11/go.mod h1:0MR+sS1b/yxsfAPvAESrw8NfwUoxMinDyw6EYR9BS2U=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34 h1:gGLG7yKaXG02/jBlg210R7VgQIotiQntNhsCFejawx8=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 h1:AzwRi5OKKwo4QNqPf7TjeO+tK8AyOK3GVSwmRPo7/Cs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25/go.mod h1:SUbB4wcbSEyCvqBxv/O/IBf93RbEze7U7OnoTlpPB+g=
github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0=
github.com/aws/aws-sdk-go-v2/service/ecr v1.15.0 h1:lY2Z2sBP+zSbJ6CvvmnFgPcgknoQ0OJV88AwVetRRFk=
github.com/aws/aws-sdk-go-v2/service/ecr v1.15.0/go.mod h1:4zYI85WiYDhFaU1jPFVfkD7HlBcdnITDE3QxDwy4Kus=
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.4.1/go.mod h1:eD5Eo4drVP2FLTw0G+SMIPWNWvQRGGTtIZR2XeAagoA=
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0 h1:LsqBpyRofMG6eDs6YGud6FhdGyIyXelAasPOZ6wWLro=
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0/go.mod h1:IArQ3IBR00FkuraKwudKZZU32OxJfdTdwV+W5iZh3Y4=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28 h1:vGWm5vTpMr39tEZfQeDiDAMgk+5qsnvRny3FjLpnH5w=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.28/go.mod h1:spfrICMD6wCAhjhzHuy6DOZZ+LAIY10UxhUmLzpJTTs=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zceowr5Z1Nh2WVP8bf/3ikB41IZW59E4yIYbg+pC6mw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4 h1:b16QW0XWl0jWjLABFc1A+uh145Oqv+xDcObNk0iQgUk=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.4/go.mod h1:uKkN7qmSIsNJVyMtxNQoCEYMvFEXbOg9fwCJPdfp2u8=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27 h1:0iKliEXAcCa2qVtRs7Ot5hItA2MsufrphbRFlz1Owxo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.27/go.mod h1:EOwBD4J4S5qYszS5/3DpkejfuK+Z5/1uzICfPaZLtqw=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2 h1:NbWkRxEEIRSCqxhsHQuMiTH7yo+JZW1gp8v3elSVMTQ=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.2/go.mod h1:4tfW5l4IAB32VWCDEBxCRtR9T4BWy4I4kr1spr8NgZM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1 h1:O+9nAy9Bb6bJFTpeNFtd9UfHbgxO1o4ZDAM9rQp5NsY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.1/go.mod h1:J9kLNzEiHSeGMyN7238EjJmBpCniVzFda75Gxl/NqB8=
github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHDnzZP/sKbhDWV2T1EOzFIM=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 h1:Uw5wBybFQ1UeA9ts0Y07gbv0ncZnIAyw858tDW0NP2o=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.4/go.mod h1:cPDwJwsP4Kff9mldCXAmddjJL6JGQqtA3Mzer2zyr88=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.10 h1:UBQjaMTCKwyUYwiVnUt6toEJwGXsLBI6al083tpjJzY=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.10/go.mod h1:ouy2P4z6sJN70fR3ka3wD3Ro3KezSxU6eKGQI2+2fjI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10 h1:PkHIIJs8qvq0e5QybnZoG1K/9QTrLr9OsqCIo59jOBA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.10/go.mod h1:AFvkxc8xfBe8XA+5St5XIHHrQQtkxqrRincx4hmMHOk=
github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 h1:+xtV90n3abQmgzk1pS++FdxZTrPEDgQng6e4/56WR2A=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.4/go.mod h1:lfSYenAXtavyX2A1LsViglqlG9eEFYxNryTZS5rn3QE=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0 h1:2DQLAKDteoEDI8zpCzqBMaZlJuoE9iTYD0gFmXVax9E=
github.com/aws/aws-sdk-go-v2/service/sts v1.19.0/go.mod h1:BgQOMsg8av8jset59jelyPW7NoZcZXLVpDsXunGDrk8=
github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.11.0/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE=
github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 h1:IWeCJzU+IYaO2rVEBlGPTBfe90cmGXFTLdhUFlzDGsY=
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795/go.mod h1:8vJsEZ4iRqG+Vx6pKhWK6U00qcj0KC37IsfszMkY6UE=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -319,6 +333,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.15.1 h1:RsJ9NbfxYWF8Wl4VmvkpN3zYATwuvlPq2j20zmcs63E=
@ -683,7 +698,6 @@ golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
@ -781,7 +795,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -789,7 +802,6 @@ golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
@ -802,7 +814,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=

View File

@ -17,6 +17,7 @@ limitations under the License.
package buildcontext
import (
"context"
"fmt"
"os"
"path/filepath"
@ -26,10 +27,10 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
// S3 unifies calls to download and unpack the build context.
@ -44,25 +45,31 @@ func (s *S3) UnpackTarFromBuildContext() (string, error) {
return "", fmt.Errorf("getting bucketname and filepath from context: %w", err)
}
option := session.Options{
SharedConfigState: session.SharedConfigEnable,
}
endpoint := os.Getenv(constants.S3EndpointEnv)
forcePath := false
if strings.ToLower(os.Getenv(constants.S3ForcePathStyle)) == "true" {
forcePath = true
}
if endpoint != "" {
option.Config = aws.Config{
Endpoint: aws.String(endpoint),
S3ForcePathStyle: aws.Bool(forcePath),
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
if endpoint != "" {
return aws.Endpoint{
URL: endpoint,
}, nil
}
}
sess, err := session.NewSessionWithOptions(option)
return aws.Endpoint{}, &aws.EndpointNotFoundError{}
})
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithEndpointResolverWithOptions(customResolver))
if err != nil {
return bucket, err
}
downloader := s3manager.NewDownloader(sess)
client := s3.NewFromConfig(cfg, func(options *s3.Options) {
if endpoint != "" {
options.UsePathStyle = forcePath
}
})
downloader := s3manager.NewDownloader(client)
directory := kConfig.BuildContextDir
tarPath := filepath.Join(directory, constants.ContextTar)
if err := os.MkdirAll(directory, 0750); err != nil {
@ -72,7 +79,7 @@ func (s *S3) UnpackTarFromBuildContext() (string, error) {
if err != nil {
return directory, err
}
_, err = downloader.Download(file,
_, err = downloader.Download(context.TODO(), file,
&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(item),

View File

@ -9,3 +9,6 @@ Gemfile.lock
/private/model/cli/gen-api/gen-api
.gradle/
build/
.idea/
bin/
.vscode/

View File

@ -6,7 +6,7 @@ modules-download-mode = "readonly"
allow-parallel-runners = true
skip-dirs = ["internal/repotools"]
skip-dirs-use-default = true
skip-files = ["service/transcribestreaming/eventstream_test.go"]
[output]
format = "github-actions"

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.

View File

@ -14,29 +14,28 @@ Jump To:
* [Feature Requests](#feature-requests)
* [Code Contributions](#code-contributions)
## How to contribute
*Before you send us a pull request, please be sure that:*
1. You're working from the latest source on the master branch.
2. You check existing open, and recently closed, pull requests to be sure
1. You're working from the latest source on the `main` branch.
2. You check existing open, and recently closed, pull requests to be sure
that someone else hasn't already addressed the problem.
3. You create an issue before working on a contribution that will take a
3. You create an issue before working on a contribution that will take a
significant amount of your time.
*Creating a Pull Request*
1. Fork the repository.
2. In your fork, make your change in a branch that's based on this repo's master branch.
2. In your fork, make your change in a branch that's based on this repo's `main` branch.
3. Commit the change to your fork, using a clear and descriptive commit message.
4. Create a pull request, answering any questions in the pull request form.
For contributions that will take a significant amount of time, open a new
issue to pitch your idea before you get started. Explain the problem and
describe the content you want to see added to the documentation. Let us know
if you'll write it yourself or if you'd like us to help. We'll discuss your
proposal with you and let you know whether we're likely to accept it.
For contributions that will take a significant amount of time, open a new
issue to pitch your idea before you get started. Explain the problem and
describe the content you want to see added to the documentation. Let us know
if you'll write it yourself or if you'd like us to help. We'll discuss your
proposal with you and let you know whether we're likely to accept it.
## Bug Reports
@ -74,9 +73,9 @@ guidelines prior to filing a bug report.
Open an [issue][issues] with the following:
* A short, descriptive title. Ideally, other community members should be able
* A short, descriptive title. Ideally, other community members should be able
to get a good idea of the feature just from reading the title.
* A detailed description of the the proposed feature.
* A detailed description of the the proposed feature.
* Why it should be added to the SDK.
* If possible, example code to illustrate how it should work.
* Use Markdown to make the request easier to read;
@ -97,7 +96,7 @@ Please be aware of the following notes prior to opening a pull request:
3. Wherever possible, pull requests should contain tests as appropriate.
Bugfixes should contain tests that exercise the corrected behavior (i.e., the
test should fail without the bugfix and pass with it), and new features
test should fail without the bugfix and pass with it), and new features
should be accompanied by tests exercising the feature.
4. Pull requests that contain failing tests will not be merged until the test
@ -112,7 +111,7 @@ Please be aware of the following notes prior to opening a pull request:
### Testing
To run the tests locally, running the `make unit` command will `go get` the
To run the tests locally, running the `make unit` command will `go get` the
SDK's testing dependencies, and run vet, link and unit tests for the SDK.
```
@ -129,7 +128,7 @@ go test -tags codegen ./private/...
See the `Makefile` for additional testing tags that can be used in testing.
To test on multiple platform the SDK includes several DockerFiles under the
To test on multiple platform the SDK includes several DockerFiles under the
`awstesting/sandbox` folder, and associated make recipes to to execute
unit testing within environments configured for specific Go versions.
@ -170,9 +169,9 @@ This will result in a patch version change.
* `SDK Bugs` - For minor changes that resolve an issue. This will result in a
patch version change.
[issues]: https://github.com/aws/aws-sdk-go/issues
[pr]: https://github.com/aws/aws-sdk-go/pulls
[issues]: https://github.com/aws/aws-sdk-go-v2/issues
[pr]: https://github.com/aws/aws-sdk-go-v2/pulls
[license]: http://aws.amazon.com/apache2.0/
[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
[releasenotes]: https://github.com/aws/aws-sdk-go/releases
[releasenotes]: https://github.com/aws/aws-sdk-go-v2/releases

View File

@ -12,4 +12,4 @@ Past Discussions
---
The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions.
[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md
[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/main/CODE_OF_CONDUCT.md

View File

@ -72,22 +72,22 @@ all: generate unit
# Code Generation #
###################
.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \
gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy gen-aws-ptrs tidy-modules-% \
gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \
add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \
sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \
update-module-metadata download-modules-%
generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \
gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy min-go-version-. \
gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \
tidy-modules-. add-module-license-files gen-aws-ptrs format
smithy-generate:
cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean
smithy-build: gen-repo-mod-replace
smithy-build:
cd codegen && ./gradlew clean build -Plog-tests
smithy-build-%: gen-repo-mod-replace
smithy-build-%:
@# smithy-build- command that uses the pattern to define build filter that
@# the smithy API model service id starts with. Strips off the
@# "smithy-build-".
@ -120,19 +120,32 @@ gen-config-asserts:
gen-internal-codegen:
@echo "Generating internal/codegen"
cd internal/codegen \
&& go mod tidy \
&& go generate
gen-repo-mod-replace:
@echo "Generating go.mod replace for repo modules"
go run ${REPOTOOLS_CMD_MAKE_RELATIVE}
gen-mod-replace-smithy:
gen-mod-replace-smithy-%:
@# gen-mod-replace-smithy- command that uses the pattern to define build filter that
@# for modules to add replace to. Strips off the "gen-mod-replace-smithy-".
@#
@# SMITHY_GO_SRC environment variable is the path to add replace to
@#
@# e.g. gen-mod-replace-smithy-service_ssooidc
cd ./internal/repotools/cmd/eachmodule \
&& go run . "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}"
&& go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
"go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}"
gen-mod-dropreplace-smithy:
gen-mod-dropreplace-smithy-%:
@# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that
@# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-".
@#
@# e.g. gen-mod-dropreplace-smithy-service_ssooidc
cd ./internal/repotools/cmd/eachmodule \
&& go run . "go mod edit -dropreplace github.com/aws/smithy-go"
&& go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
"go mod edit -dropreplace github.com/aws/smithy-go"
gen-aws-ptrs:
cd aws && go generate
@ -233,7 +246,6 @@ unit-race-modules-%:
"go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
"go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
unit-modules-%:
@# unit command that uses the pattern to define the root path that the
@# module testing will start from. Strips off the "unit-modules-" and
@ -395,7 +407,6 @@ bench-modules-%:
&& go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \
"go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..."
#####################
# Release Process #
#####################
@ -486,14 +497,22 @@ list-deps-%:
###################
.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip
sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip
sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-go1.18 sandbox-test-go1.19 sandbox-test-go1.20 sandbox-test-gotip
sandbox-build-%:
@# sandbox-build-go1.17
@# sandbox-build-gotip
docker build \
-f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" .
@if [ $@ == sandbox-build-gotip ]; then\
docker build \
-f ./internal/awstesting/sandbox/Dockerfile.test.gotip \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\
else\
docker build \
--build-arg GO_VERSION=$(subst sandbox-build-go,,$@) \
-f ./internal/awstesting/sandbox/Dockerfile.test.goversion \
-t "aws-sdk-go-$(subst sandbox-build-,,$@)" . ;\
fi
sandbox-run-%: sandbox-build-%
@# sandbox-run-go1.17
@# sandbox-run-gotip

View File

@ -1,3 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

View File

@ -1,13 +1,12 @@
# AWS SDK for Go v2
[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
[![Go Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[![Codegen Build status](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [![SDK Documentation](https://img.shields.io/badge/SDK-Documentation-blue)](https://aws.github.io/aws-sdk-go-v2/docs/) [![Migration Guide](https://img.shields.io/badge/Migration-Guide-blue)](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [![API Reference](https://img.shields.io/badge/api-reference-blue.svg)](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [![Apache V2 License](https://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go-v2/blob/main/LICENSE.txt)
`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language.
The v2 SDK requires a minimum version of `Go 1.15`.
Checkout out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug
Check out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug
fixes, updates, and features added to the SDK.
Jump To:
@ -87,7 +86,7 @@ func main() {
###### Compile and Execute
```sh
$ go run .
Table:
Tables:
tableOne
tableTwo
```
@ -97,9 +96,9 @@ tableTwo
Please use these community resources for getting help. We use the GitHub issues
for tracking bugs and feature requests.
* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag.
* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html).
* Ask us a [question](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=q-a) or open a [discussion](https://github.com/aws/aws-sdk-go-v2/discussions/new?category=general).
* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose).
* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html).
This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/).
@ -107,7 +106,7 @@ This SDK implements AWS service APIs. For general issues regarding the AWS servi
If you encounter a bug with the AWS SDK for Go we would like to hear about it.
Search the [existing issues][Issues] and see
if others are also experiencing the issue before opening a new issue. Please
if others are also experiencing the same issue before opening a new issue. Please
include the version of AWS SDK for Go, Go language, and OS youre using. Please
also include reproduction case when appropriate.
@ -118,7 +117,7 @@ Keeping the list of open issues lean will help us respond in a timely manner.
## Feedback and contributing
The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch.
@ -129,6 +128,8 @@ The v2 SDK will use GitHub [Issues] to track feature requests and issues with th
[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and
use the AWS SDK for Go V2.
[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go.
[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this
document to look up all API operation input and output parameters for AWS
services supported by the SDK. The API reference also includes documentation of
@ -137,8 +138,8 @@ API operation require parameters.
[Service Documentation](https://aws.amazon.com/documentation/) - Use this
documentation to learn how to interface with AWS services. These guides are
great for getting started with a service, or when looking for more
information about a service. While this document is not required for coding,
great for getting started with a service, or when looking for more
information about a service. While this document is not required for coding,
services may supply helpful samples to look out for.
[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback
@ -149,7 +150,7 @@ services may supply helpful samples to look out for.
[Dep]: https://github.com/golang/dep
[Issues]: https://github.com/aws/aws-sdk-go-v2/issues
[Projects]: https://github.com/aws/aws-sdk-go-v2/projects
[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md
[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md
[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/
[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md
[design]: https://github.com/aws/aws-sdk-go-v2/blob/main/DESIGN.md
[license]: http://aws.amazon.com/apache2.0/

View File

@ -75,9 +75,8 @@ func Parse(arn string) (ARN, error) {
}, nil
}
// IsARN returns whether the given string is an ARN by looking for
// whether the string starts with "arn:" and contains the correct number
// of sections delimited by colons(:).
// IsARN returns whether the given string is an arn
// by looking for whether the string starts with arn:
func IsARN(arn string) bool {
return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1
}

View File

@ -3,13 +3,14 @@ package aws
import (
"net/http"
smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
// HTTPClient provides the interface to provide custom HTTPClients. Generally
// *http.Client is sufficient for most use cases. The HTTPClient should not
// follow redirects.
// follow 301 or 302 redirects.
type HTTPClient interface {
Do(*http.Request) (*http.Response, error)
}
@ -25,11 +26,23 @@ type Config struct {
// information on AWS regions.
Region string
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
// The credentials object to use when signing requests.
// Use the LoadDefaultConfig to load configuration from all the SDK's supported
// sources, and resolve credentials using the SDK's default credential chain.
Credentials CredentialsProvider
// The Bearer Authentication token provider to use for authenticating API
// operation calls with a Bearer Authentication token. The API clients and
// operation must support Bearer Authentication scheme in order for the
// token provider to be used. API clients created with NewFromConfig will
// automatically be configured with this option, if the API client support
// Bearer Authentication.
//
// The SDK's config.LoadDefaultConfig can automatically populate this
// option for external configuration options such as SSO session.
// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
BearerAuthTokenProvider smithybearer.TokenProvider
// The HTTP Client the SDK's API clients will use to invoke HTTP requests.
// The SDK defaults to a BuildableClient allowing API clients to create
// copies of the HTTP Client for service specific customizations.

View File

@ -46,14 +46,14 @@ type CredentialsCacheOptions struct {
// CredentialsCache will look for optional interfaces on the Provider to adjust
// how the credential cache handles credentials caching.
//
// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
// credential refresh failures. This could return an updated Credentials
// value, or attempt another means of retrieving credentials.
// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle
// credential refresh failures. This could return an updated Credentials
// value, or attempt another means of retrieving credentials.
//
// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
// credentials Expires is modified. This could modify how the Credentials
// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
// Such as providing a floor not to reduce the Expires below.
// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how
// credentials Expires is modified. This could modify how the Credentials
// Expires is adjusted based on the CredentialsCache ExpiryWindow option.
// Such as providing a floor not to reduce the Expires below.
type CredentialsCache struct {
provider CredentialsProvider
@ -178,6 +178,12 @@ func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil))
}
// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache
// matches the target provider type.
func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool {
return IsCredentialsProvider(p.provider, target)
}
// HandleFailRefreshCredentialsCacheStrategy is an interface for
// CredentialsCache to allow CredentialsProvider how failed to refresh
// credentials is handled.

View File

@ -3,6 +3,7 @@ package aws
import (
"context"
"fmt"
"reflect"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
@ -23,41 +24,41 @@ import (
// The following example demonstrates using the AnonymousCredentials to prevent
// SDK's external config loading attempt to resolve credentials.
//
// cfg, err := config.LoadDefaultConfig(context.TODO(),
// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
// )
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
// cfg, err := config.LoadDefaultConfig(context.TODO(),
// config.WithCredentialsProvider(aws.AnonymousCredentials{}),
// )
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(cfg)
// client := s3.NewFromConfig(cfg)
//
// Alternatively you can leave the API client Option's `Credential` member to
// nil. If using the `NewFromConfig` constructor you'll need to explicitly set
// the `Credentials` member to nil, if the external config resolved a
// credential provider.
//
// client := s3.New(s3.Options{
// // Credentials defaults to a nil value.
// })
// client := s3.New(s3.Options{
// // Credentials defaults to a nil value.
// })
//
// This can also be configured for specific operations calls too.
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// log.Fatalf("failed to load config, %v", err)
// }
//
// client := s3.NewFromConfig(config)
// client := s3.NewFromConfig(config)
//
// result, err := client.GetObject(context.TODO(), s3.GetObject{
// Bucket: aws.String("example-bucket"),
// Key: aws.String("example-key"),
// }, func(o *s3.Options) {
// o.Credentials = nil
// // Or
// o.Credentials = aws.AnonymousCredentials{}
// })
// result, err := client.GetObject(context.TODO(), s3.GetObject{
// Bucket: aws.String("example-bucket"),
// Key: aws.String("example-key"),
// }, func(o *s3.Options) {
// o.Credentials = nil
// // Or
// o.Credentials = aws.AnonymousCredentials{}
// })
type AnonymousCredentials struct{}
// Retrieve implements the CredentialsProvider interface, but will always
@ -129,3 +130,41 @@ type CredentialsProviderFunc func(context.Context) (Credentials, error)
func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
return fn(ctx)
}
type isCredentialsProvider interface {
IsCredentialsProvider(CredentialsProvider) bool
}
// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the
// implementation type.
//
// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating
// whether target matches the credential provider type.
//
// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used:
//
// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false
// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false
// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false
// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false
func IsCredentialsProvider(provider, target CredentialsProvider) bool {
if target == nil || provider == nil {
return provider == target
}
if x, ok := provider.(isCredentialsProvider); ok {
return x.IsCredentialsProvider(target)
}
targetType := reflect.TypeOf(target)
if targetType.Kind() != reflect.Ptr {
targetType = reflect.PtrTo(targetType)
}
providerType := reflect.TypeOf(provider)
if providerType.Kind() != reflect.Ptr {
providerType = reflect.PtrTo(providerType)
}
return targetType.AssignableTo(providerType)
}

View File

@ -1,7 +1,7 @@
// Package aws provides the core SDK's utilities and shared types. Use this package's
// utilities to simplify setting and reading API operations parameters.
//
// Value and Pointer Conversion Utilities
// # Value and Pointer Conversion Utilities
//
// This package includes a helper conversion utility for each scalar type the SDK's
// API use. These utilities make getting a pointer of the scalar, and dereferencing
@ -16,33 +16,33 @@
// to get pointer of a literal string value, because getting the address of a
// literal requires assigning the value to a variable first.
//
// var strPtr *string
// var strPtr *string
//
// // Without the SDK's conversion functions
// str := "my string"
// strPtr = &str
// // Without the SDK's conversion functions
// str := "my string"
// strPtr = &str
//
// // With the SDK's conversion functions
// strPtr = aws.String("my string")
// // With the SDK's conversion functions
// strPtr = aws.String("my string")
//
// // Convert *string to string value
// str = aws.ToString(strPtr)
// // Convert *string to string value
// str = aws.ToString(strPtr)
//
// In addition to scalars the aws package also includes conversion utilities for
// map and slice for commonly types used in API parameters. The map and slice
// conversion functions use similar naming pattern as the scalar conversion
// functions.
//
// var strPtrs []*string
// var strs []string = []string{"Go", "Gophers", "Go"}
// var strPtrs []*string
// var strs []string = []string{"Go", "Gophers", "Go"}
//
// // Convert []string to []*string
// strPtrs = aws.StringSlice(strs)
// // Convert []string to []*string
// strPtrs = aws.StringSlice(strs)
//
// // Convert []*string to []string
// strs = aws.ToStringSlice(strPtrs)
// // Convert []*string to []string
// strs = aws.ToStringSlice(strPtrs)
//
// SDK Default HTTP Client
// # SDK Default HTTP Client
//
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
// the SDK's Session, or service client constructor. This means that if the

View File

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.3"
const goModuleVersion = "1.18.0"

View File

@ -7,10 +7,12 @@ package aws
// The entire 64-bit group is reserved for later expansion by the SDK.
//
// Example: Setting ClientLogMode to enable logging of retries and requests
// clientLogMode := aws.LogRetries | aws.LogRequest
//
// clientLogMode := aws.LogRetries | aws.LogRequest
//
// Example: Adding an additional log mode to an existing ClientLogMode value
// clientLogMode |= aws.LogResponse
//
// clientLogMode |= aws.LogResponse
type ClientLogMode uint64
// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events.

View File

@ -0,0 +1,94 @@
package middleware
import (
"context"
"fmt"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"os"
)
const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME"
const envAmznTraceID = "_X_AMZN_TRACE_ID"
const amznTraceIDHeader = "X-Amzn-Trace-Id"
// AddRecursionDetection adds recursionDetection to the middleware stack
func AddRecursionDetection(stack *middleware.Stack) error {
return stack.Build.Add(&RecursionDetection{}, middleware.After)
}
// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent
// to avoid recursion invocation in Lambda
type RecursionDetection struct{}
// ID returns the middleware identifier
func (m *RecursionDetection) ID() string {
return "RecursionDetection"
}
// HandleBuild detects Lambda environment and adds its trace ID to request header if absent
func (m *RecursionDetection) HandleBuild(
ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown request type %T", req)
}
_, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName)
xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID)
value := req.Header.Get(amznTraceIDHeader)
// only set the X-Amzn-Trace-Id header when it is not set initially, the
// current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists
if value != "" || !hasLambdaEnv || !hasTraceID {
return next.HandleBuild(ctx, in)
}
req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID))
return next.HandleBuild(ctx, in)
}
func percentEncode(s string) string {
upperhex := "0123456789ABCDEF"
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEncode(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
required := len(s) + 2*hexCount
t := make([]byte, required)
j := 0
for i := 0; i < len(s); i++ {
if c := s[i]; shouldEncode(c) {
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
} else {
t[j] = c
j++
}
}
return string(t)
}
func shouldEncode(c byte) bool {
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',':
return false
default:
return true
}
}

View File

@ -68,10 +68,12 @@ type requestUserAgent struct {
// request.
//
// User-Agent example:
// aws-sdk-go-v2/1.2.3
//
// aws-sdk-go-v2/1.2.3
//
// X-Amz-User-Agent example:
// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
//
// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
func newRequestUserAgent() *requestUserAgent {
userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
addProductName(userAgent)

View File

@ -0,0 +1,62 @@
# v1.4.10 (2022-12-02)
* No change notes available for this release.
# v1.4.9 (2022-10-24)
* No change notes available for this release.
# v1.4.8 (2022-09-14)
* No change notes available for this release.
# v1.4.7 (2022-09-02)
* No change notes available for this release.
# v1.4.6 (2022-08-31)
* No change notes available for this release.
# v1.4.5 (2022-08-29)
* No change notes available for this release.
# v1.4.4 (2022-08-09)
* No change notes available for this release.
# v1.4.3 (2022-06-29)
* No change notes available for this release.
# v1.4.2 (2022-06-07)
* No change notes available for this release.
# v1.4.1 (2022-03-24)
* No change notes available for this release.
# v1.4.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.3.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.2.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.1.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.0.0 (2021-11-06)
* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.
* **Release**: Protocol support has been added for AWS event stream.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

View File

@ -6,63 +6,62 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/aws/smithy-go/logging"
"hash"
"hash/crc32"
"io"
"github.com/aws/aws-sdk-go/aws"
)
// DecoderOptions is the Decoder configuration options.
type DecoderOptions struct {
Logger logging.Logger
LogMessages bool
}
// Decoder provides decoding of an Event Stream messages.
type Decoder struct {
r io.Reader
logger aws.Logger
options DecoderOptions
}
// NewDecoder initializes and returns a Decoder for decoding event
// stream messages from the reader provided.
func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder {
d := &Decoder{
r: r,
func NewDecoder(optFns ...func(*DecoderOptions)) *Decoder {
options := DecoderOptions{}
for _, fn := range optFns {
fn(&options)
}
for _, opt := range opts {
opt(d)
}
return d
}
// DecodeWithLogger adds a logger to be used by the decoder when decoding
// stream events.
func DecodeWithLogger(logger aws.Logger) func(*Decoder) {
return func(d *Decoder) {
d.logger = logger
return &Decoder{
options: options,
}
}
// Decode attempts to decode a single message from the event stream reader.
// Will return the event stream message, or error if Decode fails to read
// Will return the event stream message, or error if decodeMessage fails to read
// the message from the stream.
func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) {
reader := d.r
if d.logger != nil {
//
// payloadBuf is a byte slice that will be used in the returned Message.Payload. Callers
// must ensure that the Message.Payload from a previous decode has been consumed before passing in the same underlying
// payloadBuf byte slice.
func (d *Decoder) Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
if d.options.Logger != nil && d.options.LogMessages {
debugMsgBuf := bytes.NewBuffer(nil)
reader = io.TeeReader(reader, debugMsgBuf)
defer func() {
logMessageDecode(d.logger, debugMsgBuf, m, err)
logMessageDecode(d.options.Logger, debugMsgBuf, m, err)
}()
}
m, err = Decode(reader, payloadBuf)
m, err = decodeMessage(reader, payloadBuf)
return m, err
}
// Decode attempts to decode a single message from the event stream reader.
// Will return the event stream message, or error if Decode fails to read
// decodeMessage attempts to decode a single message from the event stream reader.
// Will return the event stream message, or error if decodeMessage fails to read
// the message from the reader.
func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
func decodeMessage(reader io.Reader, payloadBuf []byte) (m Message, err error) {
crc := crc32.New(crc32IEEETable)
hashReader := io.TeeReader(reader, crc)
@ -95,15 +94,15 @@ func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) {
return m, nil
}
func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
func logMessageDecode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) {
w := bytes.NewBuffer(nil)
defer func() { logger.Log(w.String()) }()
defer func() { logger.Logf(logging.Debug, w.String()) }()
fmt.Fprintf(w, "Raw message:\n%s\n",
hex.Dump(msgBuf.Bytes()))
if decodeErr != nil {
fmt.Fprintf(w, "Decode error: %v\n", decodeErr)
fmt.Fprintf(w, "decodeMessage error: %v\n", decodeErr)
return
}
@ -167,13 +166,14 @@ func decodeUint8(r io.Reader) (uint8, error) {
if br, ok := r.(byteReader); ok {
v, err := br.ReadByte()
return uint8(v), err
return v, err
}
var b [1]byte
_, err := io.ReadFull(r, b[:])
return uint8(b[0]), err
return b[0], err
}
func decodeUint16(r io.Reader) (uint16, error) {
var b [2]byte
bs := b[:]
@ -183,6 +183,7 @@ func decodeUint16(r io.Reader) (uint16, error) {
}
return binary.BigEndian.Uint16(bs), nil
}
func decodeUint32(r io.Reader) (uint32, error) {
var b [4]byte
bs := b[:]
@ -192,6 +193,7 @@ func decodeUint32(r io.Reader) (uint32, error) {
}
return binary.BigEndian.Uint32(bs), nil
}
func decodeUint64(r io.Reader) (uint64, error) {
var b [8]byte
bs := b[:]

View File

@ -6,55 +6,54 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/aws/smithy-go/logging"
"hash"
"hash/crc32"
"io"
"github.com/aws/aws-sdk-go/aws"
)
// EncoderOptions is the configuration options for Encoder.
type EncoderOptions struct {
Logger logging.Logger
LogMessages bool
}
// Encoder provides EventStream message encoding.
type Encoder struct {
w io.Writer
logger aws.Logger
options EncoderOptions
headersBuf *bytes.Buffer
messageBuf *bytes.Buffer
}
// NewEncoder initializes and returns an Encoder to encode Event Stream
// messages to an io.Writer.
func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder {
e := &Encoder{
w: w,
// messages.
func NewEncoder(optFns ...func(*EncoderOptions)) *Encoder {
o := EncoderOptions{}
for _, fn := range optFns {
fn(&o)
}
return &Encoder{
options: o,
headersBuf: bytes.NewBuffer(nil),
}
for _, opt := range opts {
opt(e)
}
return e
}
// EncodeWithLogger adds a logger to be used by the encode when decoding
// stream events.
func EncodeWithLogger(logger aws.Logger) func(*Encoder) {
return func(d *Encoder) {
d.logger = logger
messageBuf: bytes.NewBuffer(nil),
}
}
// Encode encodes a single EventStream message to the io.Writer the Encoder
// was created with. An error is returned if writing the message fails.
func (e *Encoder) Encode(msg Message) (err error) {
func (e *Encoder) Encode(w io.Writer, msg Message) (err error) {
e.headersBuf.Reset()
e.messageBuf.Reset()
writer := e.w
if e.logger != nil {
var writer io.Writer = e.messageBuf
if e.options.Logger != nil && e.options.LogMessages {
encodeMsgBuf := bytes.NewBuffer(nil)
writer = io.MultiWriter(writer, encodeMsgBuf)
defer func() {
logMessageEncode(e.logger, encodeMsgBuf, msg, err)
logMessageEncode(e.options.Logger, encodeMsgBuf, msg, err)
}()
}
@ -85,12 +84,18 @@ func (e *Encoder) Encode(msg Message) (err error) {
}
msgCRC := crc.Sum32()
return binary.Write(writer, binary.BigEndian, msgCRC)
if err := binary.Write(writer, binary.BigEndian, msgCRC); err != nil {
return err
}
_, err = io.Copy(w, e.messageBuf)
return err
}
func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) {
func logMessageEncode(logger logging.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) {
w := bytes.NewBuffer(nil)
defer func() { logger.Log(w.String()) }()
defer func() { logger.Logf(logging.Debug, w.String()) }()
fmt.Fprintf(w, "Message to encode:\n")
encoder := json.NewEncoder(w)

View File

@ -4,6 +4,7 @@ package eventstreamapi
const (
ChunkSignatureHeader = `:chunk-signature` // chunk signature for message
DateHeader = `:date` // Date header for signature
ContentTypeHeader = ":content-type" // message payload content-type
// Message header and values
MessageTypeHeader = `:message-type` // Identifies type of message.

View File

@ -0,0 +1,71 @@
package eventstreamapi
import (
"context"
"fmt"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
)
type eventStreamWriterKey struct{}
// GetInputStreamWriter returns EventTypeHeader io.PipeWriter used for the operation's input event stream.
func GetInputStreamWriter(ctx context.Context) io.WriteCloser {
writeCloser, _ := middleware.GetStackValue(ctx, eventStreamWriterKey{}).(io.WriteCloser)
return writeCloser
}
func setInputStreamWriter(ctx context.Context, writeCloser io.WriteCloser) context.Context {
return middleware.WithStackValue(ctx, eventStreamWriterKey{}, writeCloser)
}
// InitializeStreamWriter is a Finalize middleware initializes an in-memory pipe for sending event stream messages
// via the HTTP request body.
type InitializeStreamWriter struct{}
// AddInitializeStreamWriter adds the InitializeStreamWriter middleware to the provided stack.
func AddInitializeStreamWriter(stack *middleware.Stack) error {
return stack.Finalize.Add(&InitializeStreamWriter{}, middleware.After)
}
// ID returns the identifier for the middleware.
func (i *InitializeStreamWriter) ID() string {
return "InitializeStreamWriter"
}
// HandleFinalize is the middleware implementation.
func (i *InitializeStreamWriter) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type: %T", in.Request)
}
inputReader, inputWriter := io.Pipe()
defer func() {
if err == nil {
return
}
_ = inputReader.Close()
_ = inputWriter.Close()
}()
request, err = request.SetStream(inputReader)
if err != nil {
return out, metadata, err
}
in.Request = request
ctx = setInputStreamWriter(ctx, inputWriter)
out, metadata, err = next.HandleFinalize(ctx, in)
if err != nil {
return out, metadata, err
}
return out, metadata, err
}

View File

@ -0,0 +1,13 @@
//go:build go1.18
// +build go1.18
package eventstreamapi
import smithyhttp "github.com/aws/smithy-go/transport/http"
// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
//
// This operation is a no-op for Go 1.18 and above.
func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
return nil
}

View File

@ -0,0 +1,12 @@
//go:build !go1.18
// +build !go1.18
package eventstreamapi
import smithyhttp "github.com/aws/smithy-go/transport/http"
// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event stream functionality.
func ApplyHTTPTransportFixes(r *smithyhttp.Request) error {
r.Header.Set("Expect", "100-continue")
return nil
}

View File

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package eventstream
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.4.10"

View File

@ -3,6 +3,7 @@ package eventstream
import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
"fmt"
"io"
"strconv"
@ -487,7 +488,21 @@ func (UUIDValue) valueType() valueType {
}
func (v UUIDValue) String() string {
return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:])
var scratch [36]byte
const dash = '-'
hex.Encode(scratch[:8], v[0:4])
scratch[8] = dash
hex.Encode(scratch[9:13], v[4:6])
scratch[13] = dash
hex.Encode(scratch[14:18], v[6:8])
scratch[18] = dash
hex.Encode(scratch[19:23], v[8:10])
scratch[23] = dash
hex.Encode(scratch[24:], v[10:])
return string(scratch[:])
}
// encode encodes the UUIDValue into an eventstream binary value

View File

@ -9,9 +9,9 @@ import (
// representation of a list of values of a fixed type. A serialized array might
// look like the following:
//
// ListName.member.1=foo
// &ListName.member.2=bar
// &Listname.member.3=baz
// ListName.member.1=foo
// &ListName.member.2=bar
// &Listname.member.3=baz
type Array struct {
// The query values to add the array to.
values url.Values
@ -36,20 +36,31 @@ type Array struct {
memberName string
// Elements are stored in values, so we keep track of the list size here.
size int32
// Empty lists are encoded as "<prefix>=", if we add a value later we will
// remove this encoding
emptyValue Value
}
func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
emptyValue := newValue(values, prefix, flat)
emptyValue.String("")
return &Array{
values: values,
prefix: prefix,
flat: flat,
memberName: memberName,
emptyValue: emptyValue,
}
}
// Value adds a new element to the Query Array. Returns a Value type used to
// encode the array element.
func (a *Array) Value() Value {
if a.size == 0 {
delete(a.values, a.emptyValue.key)
}
// Query lists start a 1, so adjust the size first
a.size++
prefix := a.prefix

View File

@ -11,10 +11,10 @@ import (
// the values must all be of the same type, and that map entries are ordered.
// A serialized map might look like the following:
//
// MapName.entry.1.key=Foo
// &MapName.entry.1.value=spam
// &MapName.entry.2.key=Bar
// &MapName.entry.2.value=eggs
// MapName.entry.1.key=Foo
// &MapName.entry.1.value=spam
// &MapName.entry.2.key=Bar
// &MapName.entry.2.value=eggs
type Map struct {
// The query values to add the map to.
values url.Values

View File

@ -10,8 +10,8 @@ import (
// values where there is a fixed set of keys whose values each have their
// own known type. A serialized object might look like the following:
//
// ObjectName.Foo=value
// &ObjectName.Bar=5
// ObjectName.Foo=value
// &ObjectName.Bar=5
type Object struct {
// The query values to add the object to.
values url.Values

View File

@ -21,26 +21,18 @@ func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorCompone
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
return ErrorComponents(errResponse), nil
}
var errResponse wrappedErrorResponse
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
return ErrorComponents{
Code: errResponse.Code,
Message: errResponse.Message,
RequestID: errResponse.RequestID,
}, nil
return ErrorComponents(errResponse), nil
}
// noWrappedErrorResponse represents the error response body with
// no internal <Error></Error wrapping
// no internal Error wrapping
type noWrappedErrorResponse struct {
Code string `xml:"Code"`
Message string `xml:"Message"`
@ -48,7 +40,7 @@ type noWrappedErrorResponse struct {
}
// wrappedErrorResponse represents the error response body
// wrapped within <Error>...</Error>
// wrapped within Error
type wrappedErrorResponse struct {
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`

View File

@ -30,10 +30,6 @@ func NewTokenRateLimit(tokens uint) *TokenRateLimit {
}
}
func isTimeoutError(error) bool {
return false
}
type canceledError struct {
Err error
}

View File

@ -93,7 +93,7 @@ func (a *AdaptiveMode) IsErrorRetryable(err error) bool {
}
// MaxAttempts returns the maximum number of attempts that can be made for
// a attempt before failing. A value of 0 implies that the attempt should
// an attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable.
func (a *AdaptiveMode) MaxAttempts() int {
return a.retryer.MaxAttempts()
@ -127,7 +127,7 @@ func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) {
// GetAttemptToken returns the attempt token that can be used to rate limit
// attempt calls. Will be used by the SDK's retry package's Attempt
// middleware to get a attempt token prior to calling the temp and releasing
// middleware to get an attempt token prior to calling the temp and releasing
// the attempt token after the attempt has been made.
func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) {
for {

View File

@ -1,12 +1,12 @@
// Package retry provides interfaces and implementations for SDK request retry behavior.
//
// Retryer Interface and Implementations
// # Retryer Interface and Implementations
//
// This packages defines Retryer interface that is used to either implement custom retry behavior
// or to extend the existing retry implementations provided by the SDK. This packages provides a single
// retry implementations: Standard.
// This package defines Retryer interface that is used to either implement custom retry behavior
// or to extend the existing retry implementations provided by the SDK. This package provides a single
// retry implementation: Standard.
//
// Standard
// # Standard
//
// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited
// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs.
@ -15,66 +15,66 @@
//
// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether
// a given error is retryable. By default this list of retryables includes the following:
// - Retrying errors that implement the RetryableError method, and return true.
// - Connection Errors
// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
// - Connection Reset Errors.
// - net.OpErr types that are dialing errors or are temporary.
// - HTTP Status Codes: 500, 502, 503, and 504.
// - API Error Codes
// - RequestTimeout, RequestTimeoutException
// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
// RequestThrottled, SlowDown, EC2ThrottledException
// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
// - TransactionInProgressException, PriorRequestNotComplete
// - Retrying errors that implement the RetryableError method, and return true.
// - Connection Errors
// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true.
// - Connection Reset Errors.
// - net.OpErr types that are dialing errors or are temporary.
// - HTTP Status Codes: 500, 502, 503, and 504.
// - API Error Codes
// - RequestTimeout, RequestTimeoutException
// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException,
// RequestThrottled, SlowDown, EC2ThrottledException
// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException
// - TransactionInProgressException, PriorRequestNotComplete
//
// The standard retryer will not retry a request in the event if the context associated with the request
// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context
// value.
//
// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer
// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions
// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions
// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions,
// and the retry delay policy.
//
// For example to modify the default retry attempts for the standard retryer:
//
// // configure the custom retryer
// customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
// o.MaxAttempts = 5
// })
// // configure the custom retryer
// customRetry := retry.NewStandard(func(o *retry.StandardOptions) {
// o.MaxAttempts = 5
// })
//
// // create a service client with the retryer
// s3.NewFromConfig(cfg, func(o *s3.Options) {
// o.Retryer = customRetry
// })
// // create a service client with the retryer
// s3.NewFromConfig(cfg, func(o *s3.Options) {
// o.Retryer = customRetry
// })
//
// Utilities
// # Utilities
//
// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic
// way. These are:
//
// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable
// in addition to those considered retryable by the provided retryer.
// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable
// in addition to those considered retryable by the provided retryer.
//
// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping
// a retryer implementation.
// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping
// a retryer implementation.
//
// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
// request by wrapping a retryer implementation.
// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a
// request by wrapping a retryer implementation.
//
// The following package functions have been provided to easily satisfy different retry interfaces to further customize
// a given retryer's behavior:
//
// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
// you can use this method to easily create custom back off policies to be used with the
// standard retryer.
// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example,
// you can use this method to easily create custom back off policies to be used with the
// standard retryer.
//
// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
// this can be used to extend the standard retryer to add additional logic ot determine if a
// error should be retried.
// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example,
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be retried.
//
// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be considered a timeout.
// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example,
// this can be used to extend the standard retryer to add additional logic to determine if an
// error should be considered a timeout.
package retry

View File

@ -11,7 +11,6 @@ import (
awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithymiddle "github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/transport/http"
)
@ -90,7 +89,7 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn
out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
// AttempResult Retried states that the attempt was not successful, and
// AttemptResult Retried states that the attempt was not successful, and
// should be retried.
shouldRetry := attemptResult.Retried
@ -292,7 +291,7 @@ type retryMetadataKey struct{}
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
return metadata, ok
}
@ -301,7 +300,7 @@ func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata)
return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata)
}
// AddRetryMiddlewaresOptions is the set of options that can be passed to

View File

@ -95,8 +95,13 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary {
var timeoutErr interface{ Timeout() bool }
var urlErr *url.Error
var netOpErr *net.OpError
var dnsError *net.DNSError
switch {
case errors.As(err, &dnsError):
// NXDOMAIN errors should not be retried
retryable = !dnsError.IsNotFound && dnsError.IsTemporary
case errors.As(err, &conErr) && conErr.ConnectionError():
retryable = true

View File

@ -49,7 +49,7 @@ type Retryer interface {
IsErrorRetryable(error) bool
// MaxAttempts returns the maximum number of attempts that can be made for
// a attempt before failing. A value of 0 implies that the attempt should
// an attempt before failing. A value of 0 implies that the attempt should
// be retried until it succeeds if the errors are retryable.
MaxAttempts() int
@ -66,7 +66,7 @@ type Retryer interface {
GetInitialToken() (releaseToken func(error) error)
}
// RetryerV2 is an interface to determine if a given error from a attempt
// RetryerV2 is an interface to determine if a given error from an attempt
// should be retried, and if so what backoff delay to apply. The default
// implementation used by most services is the retry package's Standard type.
// Which contains basic retry logic using exponential backoff.

View File

@ -7,6 +7,7 @@ var IgnoredHeaders = Rules{
"Authorization": struct{}{},
"User-Agent": struct{}{},
"X-Amzn-Trace-Id": struct{}{},
"Expect": struct{}{},
},
},
}

View File

@ -46,19 +46,35 @@ func StripExcessSpaces(str string) string {
return string(buf[:m])
}
// GetURIPath returns the escaped URI component from the provided URL
// GetURIPath returns the escaped URI component from the provided URL.
func GetURIPath(u *url.URL) string {
var uri string
var uriPath string
if len(u.Opaque) > 0 {
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
const schemeSep, pathSep, queryStart = "//", "/", "?"
opaque := u.Opaque
// Cut off the query string if present.
if idx := strings.Index(opaque, queryStart); idx >= 0 {
opaque = opaque[:idx]
}
// Cutout the scheme separator if present.
if strings.Index(opaque, schemeSep) == 0 {
opaque = opaque[len(schemeSep):]
}
// capture URI path starting with first path separator.
if idx := strings.Index(opaque, pathSep); idx >= 0 {
uriPath = opaque[idx:]
}
} else {
uri = u.EscapedPath()
uriPath = u.EscapedPath()
}
if len(uri) == 0 {
uri = "/"
if len(uriPath) == 0 {
uriPath = "/"
}
return uri
return uriPath
}

View File

@ -82,7 +82,7 @@ func (m *dynamicPayloadSigningMiddleware) HandleBuild(
}
// if TLS is enabled, use unsigned payload when supported
if strings.EqualFold(req.URL.Scheme, "https") {
if req.IsHTTPS() {
return (&unsignedPayload{}).HandleBuild(ctx, in, next)
}
@ -371,13 +371,8 @@ func haveCredentialProvider(p aws.CredentialsProvider) bool {
if p == nil {
return false
}
switch p.(type) {
case aws.AnonymousCredentials,
*aws.AnonymousCredentials:
return false
}
return true
return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil))
}
type payloadHashKey struct{}

View File

@ -3,20 +3,22 @@
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
//
// Standalone Signer
// # Standalone Signer
//
// Generally using the signer outside of the SDK should not require any additional
// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
//
// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
//
// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
//
// "//<hostname>/<path>"
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
@ -252,7 +254,7 @@ func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature
// request has no payload you should use the hex encoded SHA-256 of an empty
// string as the payloadHash value.
//
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
//
// Some services such as Amazon S3 accept alternative values for the payload
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
@ -311,7 +313,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht
// request has no payload you should use the hex encoded SHA-256 of an empty
// string as the payloadHash value.
//
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
//
// Some services such as Amazon S3 accept alternative values for the payload
// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be
@ -331,10 +333,10 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht
// parameter is not used by all AWS services, and is most notable used by
// Amazon S3 APIs.
//
// expires := 20 * time.Minute
// query := req.URL.Query()
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)
// req.URL.RawQuery = query.Encode()
// expires := 20 * time.Minute
// query := req.URL.Query()
// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)
// req.URL.RawQuery = query.Encode()
//
// This method does not modify the provided request.
func (s *Signer) PresignHTTP(
@ -407,8 +409,8 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
headers = append(headers, hostHeader)
signed[hostHeader] = append(signed[hostHeader], host)
const contentLengthHeader = "content-length"
if length > 0 {
const contentLengthHeader = "content-length"
headers = append(headers, contentLengthHeader)
signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10))
}
@ -417,6 +419,10 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
if !rule.IsValid(k) {
continue // ignored header
}
if strings.EqualFold(k, contentLengthHeader) {
// prevent signing already handled content-length header.
continue
}
lowerCaseKey := strings.ToLower(k)
if _, ok := signed[lowerCaseKey]; ok {

View File

@ -1,3 +1,214 @@
# v1.18.25 (2023-05-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.24 (2023-05-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.23 (2023-05-04)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.22 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.21 (2023-04-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.20 (2023-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.19 (2023-03-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.18 (2023-03-16)
* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015.
# v1.18.17 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.15 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.14 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.13 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.12 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.11 (2023-02-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.10 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.9 (2023-01-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.8 (2023-01-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.7 (2022-12-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.6 (2022-12-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.5 (2022-12-15)
* **Bug Fix**: Unify logic between shared config and in finding home directory
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.4 (2022-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.3 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.2 (2022-11-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.1 (2022-11-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.0 (2022-11-11)
* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.11 (2022-11-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.10 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.9 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.8 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.7 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.6 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.5 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.4 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.3 (2022-08-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.2 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.1 (2022-08-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.0 (2022-08-14)
* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present.
# v1.16.1 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.0 (2022-08-10)
* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`.
# v1.15.17 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.16 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.15 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.14 (2022-07-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.13 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.12 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.11 (2022-06-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.10 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.9 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.8 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.7 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.6 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.5 (2022-05-09)
* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)

View File

@ -72,6 +72,10 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// implementations depend on or can be configured with earlier resolved
// configuration options.
resolveCredentials,
// Sets the resolved bearer authentication token API clients will use for
// httpBearerAuth authentication scheme.
resolveBearerAuthToken,
}
// A Config represents a generic configuration value or set of values. This type
@ -162,13 +166,12 @@ func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
// The custom configurations must satisfy the respective providers for their data
// or the custom data will be ignored by the resolvers and config loaders.
//
// cfg, err := config.LoadDefaultConfig( context.TODO(),
// WithSharedConfigProfile("test-profile"),
// )
// if err != nil {
// panic(fmt.Sprintf("failed loading config, %v", err))
// }
//
// cfg, err := config.LoadDefaultConfig( context.TODO(),
// WithSharedConfigProfile("test-profile"),
// )
// if err != nil {
// panic(fmt.Sprintf("failed loading config, %v", err))
// }
//
// The default configuration sources are:
// * Environment Variables

View File

@ -15,6 +15,6 @@
// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources
// implement the same provider interface, priority will be handled by the order in which the sources were passed in.
//
// A number of helpers (prefixed by ``With``) are provided in this package that implement their respective provider
// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider
// interface. These helpers should be used for overriding configuration programmatically at runtime.
package config

View File

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.15.5"
const goModuleVersion = "1.18.25"

View File

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
@ -28,6 +29,9 @@ type LoadOptions struct {
// Credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// Token provider for authentication operations with bearer authentication.
BearerAuthTokenProvider smithybearer.TokenProvider
// HTTPClient the SDK's API clients will use to invoke HTTP requests.
HTTPClient HTTPClient
@ -128,6 +132,14 @@ type LoadOptions struct {
// aws.CredentialsCacheOptions
CredentialsCacheOptions func(*aws.CredentialsCacheOptions)
// BearerAuthTokenCacheOptions is a function for setting the smithy-go
// auth/bearer#TokenCacheOptions
BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions)
// SSOTokenProviderOptions is a function for setting the
// credentials/ssocreds.SSOTokenProviderOptions
SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions)
// ProcessCredentialOptions is a function for setting
// the processcreds.Options
ProcessCredentialOptions func(*processcreds.Options)
@ -451,6 +463,73 @@ func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptio
}
}
// getBearerAuthTokenProvider returns the credentials value
func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) {
if o.BearerAuthTokenProvider == nil {
return nil, false, nil
}
return o.BearerAuthTokenProvider, true, nil
}
// WithBearerAuthTokenProvider is a helper function to construct functional options
// that sets Credential provider value on config's LoadOptions. If credentials
// provider is set to nil, the credentials provider value will be ignored.
// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides
// the previous call values.
func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.BearerAuthTokenProvider = v
return nil
}
}
// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) {
if o.BearerAuthTokenCacheOptions == nil {
return nil, false, nil
}
return o.BearerAuthTokenCacheOptions, true, nil
}
// WithBearerAuthTokenCacheOptions is a helper function to construct functional options
// that sets a function to modify the TokenCacheOptions the smithy-go
// auth/bearer#TokenCache will be configured with, if the TokenCache is used by
// the configuration loader.
//
// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides
// the previous call values.
func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.BearerAuthTokenCacheOptions = v
return nil
}
}
// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) {
if o.SSOTokenProviderOptions == nil {
return nil, false, nil
}
return o.SSOTokenProviderOptions, true, nil
}
// WithSSOTokenProviderOptions is a helper function to construct functional
// options that sets a function to modify the SSOtokenProviderOptions the SDK's
// credentials/ssocreds#SSOProvider will be configured with, if the
// SSOTokenProvider is used by the configuration loader.
//
// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides
// the previous call values.
func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.SSOTokenProviderOptions = v
return nil
}
}
// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
if o.ProcessCredentialOptions == nil {

View File

@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
@ -185,6 +186,73 @@ func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) (
return
}
// bearerAuthTokenProviderProvider provides access to the bearer authentication
// token external configuration value.
type bearerAuthTokenProviderProvider interface {
getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error)
}
// getBearerAuthTokenProvider searches the config sources for a
// bearerAuthTokenProviderProvider and returns the value if found. Returns an
// error if a provider fails before a value is found.
func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) {
for _, cfg := range configs {
if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok {
p, found, err = provider.getBearerAuthTokenProvider(ctx)
if err != nil || found {
break
}
}
}
return
}
// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
// setting the smithy-go auth/bearer#TokenCacheOptions.
type bearerAuthTokenCacheOptionsProvider interface {
getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error)
}
// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
// setting the smithy-go auth/bearer#TokenCacheOptions.
func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) (
f func(*smithybearer.TokenCacheOptions), found bool, err error,
) {
for _, config := range configs {
if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok {
f, found, err = p.getBearerAuthTokenCacheOptions(ctx)
if err != nil || found {
break
}
}
}
return
}
// ssoTokenProviderOptionsProvider is an interface for retrieving a function for
// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
type ssoTokenProviderOptionsProvider interface {
getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error)
}
// getSSOTokenProviderOptions is an interface for retrieving a function for
// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
func getSSOTokenProviderOptions(ctx context.Context, configs configs) (
f func(*ssocreds.SSOTokenProviderOptions), found bool, err error,
) {
for _, config := range configs {
if p, ok := config.(ssoTokenProviderOptionsProvider); ok {
f, found, err = p.getSSOTokenProviderOptions(ctx)
if err != nil || found {
break
}
}
}
return
}
// ssoTokenProviderOptionsProvider
// processCredentialOptions is an interface for retrieving a function for setting
// the processcreds.Options.
type processCredentialOptions interface {

View File

@ -0,0 +1,122 @@
package config
import (
"context"
"fmt"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/service/ssooidc"
smithybearer "github.com/aws/smithy-go/auth/bearer"
)
// resolveBearerAuthToken extracts a token provider from the config sources.
//
// If an explicit bearer authentication token provider is not found the
// resolver will fallback to resolving token provider via other config sources
// such as SharedConfig.
func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {
found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)
if found || err != nil {
return err
}
return resolveBearerAuthTokenProviderChain(ctx, cfg, configs)
}
// resolveBearerAuthTokenProvider extracts the first instance of
// BearerAuthTokenProvider from the config sources.
//
// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure
// the Token is only refreshed when needed. This also protects the
// TokenProvider so it can be used concurrently.
//
// Config providers used:
// * bearerAuthTokenProviderProvider
func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs)
if !found || err != nil {
return false, err
}
cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
ctx, configs, tokenProvider)
if err != nil {
return false, err
}
return true, nil
}
func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
_, sharedConfig, _ := getAWSConfigSources(configs)
var provider smithybearer.TokenProvider
if sharedConfig.SSOSession != nil {
provider, err = resolveBearerAuthSSOTokenProvider(
ctx, cfg, sharedConfig.SSOSession, configs)
}
if err == nil && provider != nil {
cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
ctx, configs, provider)
}
return err
}
func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) {
ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
if err != nil {
return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
}
var optFns []func(*ssocreds.SSOTokenProviderOptions)
if found {
optFns = append(optFns, ssoTokenProviderOptionsFn)
}
cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name)
if err != nil {
return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err)
}
client := ssooidc.NewFromConfig(*cfg)
provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...)
return provider, nil
}
// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go
// bearer/auth#TokenCache with the provided options if the provider is not
// already a TokenCache.
func wrapWithBearerAuthTokenCache(
ctx context.Context,
cfgs configs,
provider smithybearer.TokenProvider,
optFns ...func(*smithybearer.TokenCacheOptions),
) (smithybearer.TokenProvider, error) {
_, ok := provider.(*smithybearer.TokenCache)
if ok {
return provider, nil
}
tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs)
if err != nil {
return nil, err
}
opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns))
opts = append(opts, func(o *smithybearer.TokenCacheOptions) {
o.RefreshBeforeExpires = 5 * time.Minute
o.RetrieveBearerTokenTimeout = 30 * time.Second
})
opts = append(opts, optFns...)
if optionsFound {
opts = append(opts, tokenCacheConfigOptions)
}
return smithybearer.NewTokenCache(provider, opts...), nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/sso"
"github.com/aws/aws-sdk-go-v2/service/ssooidc"
"github.com/aws/aws-sdk-go-v2/service/sts"
)
@ -29,25 +30,19 @@ var (
ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
)
// resolveCredentials extracts a credential provider from slice of config sources.
// resolveCredentials extracts a credential provider from slice of config
// sources.
//
// If an explict credential provider is not found the resolver will fallback to resolving
// credentials by extracting a credential provider from EnvConfig and SharedConfig.
// If an explicit credential provider is not found the resolver will fallback
// to resolving credentials by extracting a credential provider from EnvConfig
// and SharedConfig.
func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
found, err := resolveCredentialProvider(ctx, cfg, configs)
if err != nil {
return err
}
if found {
return nil
}
err = resolveCredentialChain(ctx, cfg, configs)
if err != nil {
if found || err != nil {
return err
}
return nil
return resolveCredentialChain(ctx, cfg, configs)
}
// resolveCredentialProvider extracts the first instance of Credentials from the
@ -61,12 +56,9 @@ func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) e
// * credentialsProviderProvider
func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
credProvider, found, err := getCredentialsProvider(ctx, configs)
if err != nil {
if !found || err != nil {
return false, err
}
if !found {
return false, nil
}
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider)
if err != nil {
@ -180,7 +172,30 @@ func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *S
}
cfgCopy := cfg.Copy()
cfgCopy.Region = sharedConfig.SSORegion
if sharedConfig.SSOSession != nil {
ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
if err != nil {
return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
}
var optFns []func(*ssocreds.SSOTokenProviderOptions)
if found {
optFns = append(optFns, ssoTokenProviderOptionsFn)
}
cfgCopy.Region = sharedConfig.SSOSession.SSORegion
cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name)
if err != nil {
return err
}
oidcClient := ssooidc.NewFromConfig(cfgCopy)
tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...)
options = append(options, func(o *ssocreds.Options) {
o.SSOTokenProvider = tokenProvider
o.CachedTokenFilepath = cachedPath
})
} else {
cfgCopy.Region = sharedConfig.SSORegion
}
cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...)
@ -369,10 +384,6 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro
return fmt.Errorf("token file path is not set")
}
if len(roleARN) == 0 {
return fmt.Errorf("role ARN is not set")
}
optFns := []func(*stscreds.WebIdentityRoleOptions){
func(options *stscreds.WebIdentityRoleOptions) {
options.RoleSessionName = sessionName
@ -383,11 +394,29 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro
if err != nil {
return err
}
if found {
optFns = append(optFns, optFn)
}
provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
opts := stscreds.WebIdentityRoleOptions{
RoleARN: roleARN,
}
for _, fn := range optFns {
fn(&opts)
}
if len(opts.RoleARN) == 0 {
return fmt.Errorf("role ARN is not set")
}
client := opts.Client
if client == nil {
client = sts.NewFromConfig(*cfg)
}
provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
cfg.Credentials = provider
@ -454,7 +483,7 @@ func wrapWithCredentialsCache(
return provider, nil
}
credCacheOptions, found, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
if err != nil {
return nil, err
}
@ -462,7 +491,7 @@ func wrapWithCredentialsCache(
// force allocation of a new slice if the additional options are
// needed, to prevent overwriting the passed in slice of options.
optFns = optFns[:len(optFns):len(optFns)]
if found {
if optionsFound {
optFns = append(optFns, credCacheOptions)
}

View File

@ -15,13 +15,19 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/internal/ini"
"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
"github.com/aws/smithy-go/logging"
)
const (
// Prefix to use for filtering profiles
// Prefix to use for filtering profiles. The profile prefix should only
// exist in the shared config file, not the credentials file.
profilePrefix = `profile `
// Prefix to be used for SSO sections. These are supposed to only exist in
// the shared config file, not the credentials file.
ssoSectionPrefix = `sso-session `
// string equivalent for boolean
endpointDiscoveryDisabled = `false`
endpointDiscoveryEnabled = `true`
@ -42,10 +48,13 @@ const (
roleDurationSecondsKey = "duration_seconds" // optional
// AWS Single Sign-On (AWS SSO) group
ssoSessionNameKey = "sso_session"
ssoRegionKey = "sso_region"
ssoStartURLKey = "sso_start_url"
ssoAccountIDKey = "sso_account_id"
ssoRegionKey = "sso_region"
ssoRoleNameKey = "sso_role_name"
ssoStartURL = "sso_start_url"
// Additional Config fields
regionKey = `region`
@ -99,7 +108,7 @@ var defaultSharedConfigProfile = DefaultSharedConfigProfile
// - Linux/Unix: $HOME/.aws/credentials
// - Windows: %USERPROFILE%\.aws\credentials
func DefaultSharedCredentialsFilename() string {
return filepath.Join(userHomeDir(), ".aws", "credentials")
return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials")
}
// DefaultSharedConfigFilename returns the SDK's default file path for
@ -110,7 +119,7 @@ func DefaultSharedCredentialsFilename() string {
// - Linux/Unix: $HOME/.aws/config
// - Windows: %USERPROFILE%\.aws\config
func DefaultSharedConfigFilename() string {
return filepath.Join(userHomeDir(), ".aws", "config")
return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config")
}
// DefaultSharedConfigFiles is a slice of the default shared config files that
@ -119,12 +128,26 @@ var DefaultSharedConfigFiles = []string{
DefaultSharedConfigFilename(),
}
// DefaultSharedCredentialsFiles is a slice of the default shared credentials files that
// the will be used in order to load the SharedConfig.
// DefaultSharedCredentialsFiles is a slice of the default shared credentials
// files that the will be used in order to load the SharedConfig.
var DefaultSharedCredentialsFiles = []string{
DefaultSharedCredentialsFilename(),
}
// SSOSession provides the shared configuration parameters of the sso-session
// section.
type SSOSession struct {
Name string
SSORegion string
SSOStartURL string
}
func (s *SSOSession) setFromIniSection(section ini.Section) {
updateString(&s.Name, section, ssoSessionNameKey)
updateString(&s.SSORegion, section, ssoRegionKey)
updateString(&s.SSOStartURL, section, ssoStartURLKey)
}
// SharedConfig represents the configuration fields of the SDK config files.
type SharedConfig struct {
Profile string
@ -144,10 +167,17 @@ type SharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
// SSO session options
SSOSessionName string
SSOSession *SSOSession
// Legacy SSO session options
SSORegion string
SSOStartURL string
// SSO fields not used
SSOAccountID string
SSORegion string
SSORoleName string
SSOStartURL string
RoleARN string
ExternalID string
@ -463,7 +493,6 @@ type LoadSharedConfigOptions struct {
//
// You can read more about shared config and credentials file location at
// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location
//
func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) {
var option LoadSharedConfigOptions
for _, fn := range optFns {
@ -485,7 +514,7 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
}
// check for profile prefix and drop duplicates or invalid profiles
err = processConfigSections(ctx, configSections, option.Logger)
err = processConfigSections(ctx, &configSections, option.Logger)
if err != nil {
return SharedConfig{}, err
}
@ -497,12 +526,12 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
}
// check for profile prefix and drop duplicates or invalid profiles
err = processCredentialsSections(ctx, credentialsSections, option.Logger)
err = processCredentialsSections(ctx, &credentialsSections, option.Logger)
if err != nil {
return SharedConfig{}, err
}
err = mergeSections(configSections, credentialsSections)
err = mergeSections(&configSections, credentialsSections)
if err != nil {
return SharedConfig{}, err
}
@ -516,53 +545,73 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
return cfg, nil
}
func processConfigSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
skipSections := map[string]struct{}{}
for _, section := range sections.List() {
// drop profiles without prefix for config files
if !strings.HasPrefix(section, profilePrefix) && !strings.EqualFold(section, "default") {
if _, ok := skipSections[section]; ok {
continue
}
// drop sections from config file that do not have expected prefixes.
switch {
case strings.HasPrefix(section, profilePrefix):
// Rename sections to remove "profile " prefixing to match with
// credentials file. If default is already present, it will be
// dropped.
newName, err := renameProfileSection(section, sections, logger)
if err != nil {
return fmt.Errorf("failed to rename profile section, %w", err)
}
skipSections[newName] = struct{}{}
case strings.HasPrefix(section, ssoSectionPrefix):
case strings.EqualFold(section, "default"):
default:
// drop this section, as invalid profile name
sections.DeleteSection(section)
if logger != nil {
logger.Logf(logging.Debug,
"A profile defined with name `%v` is ignored. For use within a shared configuration file, "+
"a non-default profile must have `profile ` prefixed to the profile name.\n",
logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+
"For use within a shared configuration file, "+
"a non-default profile must have `profile ` "+
"prefixed to the profile name.",
section,
)
}
}
}
// rename sections to remove `profile ` prefixing to match with credentials file.
// if default is already present, it will be dropped.
for _, section := range sections.List() {
if strings.HasPrefix(section, profilePrefix) {
v, ok := sections.GetSection(section)
if !ok {
return fmt.Errorf("error processing profiles within the shared configuration files")
}
// delete section with profile as prefix
sections.DeleteSection(section)
// set the value to non-prefixed name in sections.
section = strings.TrimPrefix(section, profilePrefix)
if sections.HasSection(section) {
oldSection, _ := sections.GetSection(section)
v.Logs = append(v.Logs,
fmt.Sprintf("A default profile prefixed with `profile ` found in %s, "+
"overrided non-prefixed default profile from %s", v.SourceFile, oldSection.SourceFile))
}
// assign non-prefixed name to section
v.Name = section
sections.SetSection(section, v)
}
}
return nil
}
func processCredentialsSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) {
v, ok := sections.GetSection(section)
if !ok {
return "", fmt.Errorf("error processing profiles within the shared configuration files")
}
// delete section with profile as prefix
sections.DeleteSection(section)
// set the value to non-prefixed name in sections.
section = strings.TrimPrefix(section, profilePrefix)
if sections.HasSection(section) {
oldSection, _ := sections.GetSection(section)
v.Logs = append(v.Logs,
fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+
"overriding non-default profile from %s",
v.SourceFile, oldSection.SourceFile))
sections.DeleteSection(section)
}
// assign non-prefixed name to section
v.Name = section
sections.SetSection(section, v)
return section, nil
}
func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
for _, section := range sections.List() {
// drop profiles with prefix for credential files
if strings.HasPrefix(section, profilePrefix) {
@ -596,7 +645,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) {
}
// mergeSections into mergedSections
err = mergeSections(mergedSections, sections)
err = mergeSections(&mergedSections, sections)
if err != nil {
return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
}
@ -606,7 +655,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) {
}
// mergeSections merges source section properties into destination section properties
func mergeSections(dst, src ini.Sections) error {
func mergeSections(dst *ini.Sections, src ini.Sections) error {
for _, sectionName := range src.List() {
srcSection, _ := src.GetSection(sectionName)
@ -680,6 +729,13 @@ func mergeSections(dst, src ini.Sections) error {
useFIPSEndpointKey,
defaultsModeKey,
retryModeKey,
caBundleKey,
ssoSessionNameKey,
ssoAccountIDKey,
ssoRegionKey,
ssoRoleNameKey,
ssoStartURLKey,
}
for i := range stringKeys {
if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
@ -698,7 +754,7 @@ func mergeSections(dst, src ini.Sections) error {
}
// set srcSection on dst srcSection
dst = dst.SetSection(sectionName, dstSection)
*dst = dst.SetSection(sectionName, dstSection)
}
return nil
@ -769,7 +825,7 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
}
}
// set config from the provided ini section
// set config from the provided INI section
err := c.setFromIniSection(profile, section)
if err != nil {
return fmt.Errorf("error fetching config from profile, %v, %w", profile, err)
@ -782,9 +838,8 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
// profile only have credential provider options.
c.clearAssumeRoleOptions()
} else {
// First time a profile has been seen, It must either be a assume role
// credentials, or SSO. Assert if the credential type requires a role ARN,
// the ARN is also set, or validate that the SSO configuration is complete.
// First time a profile has been seen. Assert if the credential type
// requires a role ARN, the ARN is also set
if err := c.validateCredentialsConfig(profile); err != nil {
return err
}
@ -832,11 +887,26 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
c.Source = srcCfg
}
// If the profile contains an SSO session parameter, the session MUST exist
// as a section in the config file. Load the SSO session using the name
// provided. If the session section is not found or incomplete an error
// will be returned.
if c.hasSSOTokenProviderConfiguration() {
section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName))
if !ok {
return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName)
}
var ssoSession SSOSession
ssoSession.setFromIniSection(section)
ssoSession.Name = c.SSOSessionName
c.SSOSession = &ssoSession
}
return nil
}
// setFromIniSection loads the configuration from the profile section defined in
// the provided ini file. A SharedConfig pointer type value is used so that
// the provided INI file. A SharedConfig pointer type value is used so that
// multiple config file loadings can be chained.
//
// Only loads complete logically grouped values, and will not set fields in cfg
@ -871,10 +941,16 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.Region, section, regionKey)
// AWS Single Sign-On (AWS SSO)
updateString(&c.SSOAccountID, section, ssoAccountIDKey)
// SSO session options
updateString(&c.SSOSessionName, section, ssoSessionNameKey)
// Legacy SSO session options
updateString(&c.SSORegion, section, ssoRegionKey)
updateString(&c.SSOStartURL, section, ssoStartURLKey)
// SSO fields not used
updateString(&c.SSOAccountID, section, ssoAccountIDKey)
updateString(&c.SSORoleName, section, ssoRoleNameKey)
updateString(&c.SSOStartURL, section, ssoStartURL)
if section.Has(roleDurationSecondsKey) {
d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
@ -992,32 +1068,47 @@ func (c *SharedConfig) validateCredentialType() error {
len(c.CredentialProcess) != 0,
len(c.WebIdentityTokenFile) != 0,
) {
return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso")
return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token")
}
return nil
}
func (c *SharedConfig) validateSSOConfiguration() error {
if !c.hasSSOConfiguration() {
if c.hasSSOTokenProviderConfiguration() {
err := c.validateSSOTokenProviderConfiguration()
if err != nil {
return err
}
return nil
}
if c.hasLegacySSOConfiguration() {
err := c.validateLegacySSOConfiguration()
if err != nil {
return err
}
}
return nil
}
func (c *SharedConfig) validateSSOTokenProviderConfiguration() error {
var missing []string
if len(c.SSOAccountID) == 0 {
missing = append(missing, ssoAccountIDKey)
if len(c.SSOSessionName) == 0 {
missing = append(missing, ssoSessionNameKey)
}
if len(c.SSORegion) == 0 {
missing = append(missing, ssoRegionKey)
}
if c.SSOSession == nil {
missing = append(missing, ssoSectionPrefix)
} else {
if len(c.SSOSession.SSORegion) == 0 {
missing = append(missing, ssoRegionKey)
}
if len(c.SSORoleName) == 0 {
missing = append(missing, ssoRoleNameKey)
}
if len(c.SSOStartURL) == 0 {
missing = append(missing, ssoStartURL)
if len(c.SSOSession.SSOStartURL) == 0 {
missing = append(missing, ssoStartURLKey)
}
}
if len(missing) > 0 {
@ -1025,6 +1116,40 @@ func (c *SharedConfig) validateSSOConfiguration() error {
c.Profile, strings.Join(missing, ", "))
}
if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
}
if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix)
}
return nil
}
func (c *SharedConfig) validateLegacySSOConfiguration() error {
var missing []string
if len(c.SSORegion) == 0 {
missing = append(missing, ssoRegionKey)
}
if len(c.SSOStartURL) == 0 {
missing = append(missing, ssoStartURLKey)
}
if len(c.SSOAccountID) == 0 {
missing = append(missing, ssoAccountIDKey)
}
if len(c.SSORoleName) == 0 {
missing = append(missing, ssoRoleNameKey)
}
if len(missing) > 0 {
return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
c.Profile, strings.Join(missing, ", "))
}
return nil
}
@ -1044,15 +1169,15 @@ func (c *SharedConfig) hasCredentials() bool {
}
func (c *SharedConfig) hasSSOConfiguration() bool {
switch {
case len(c.SSOAccountID) != 0:
case len(c.SSORegion) != 0:
case len(c.SSORoleName) != 0:
case len(c.SSOStartURL) != 0:
default:
return false
}
return true
return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration()
}
func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool {
return len(c.SSOSessionName) > 0
}
func (c *SharedConfig) hasLegacySSOConfiguration() bool {
return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
}
func (c *SharedConfig) clearAssumeRoleOptions() {
@ -1143,12 +1268,6 @@ func (e CredentialRequiresARNError) Error() string {
)
}
func userHomeDir() string {
// Ignore errors since we only care about Windows and *nix.
homedir, _ := os.UserHomeDir()
return homedir
}
func oneOrNone(bs ...bool) bool {
var count int

View File

@ -1,3 +1,201 @@
# v1.13.24 (2023-05-09)
* No change notes available for this release.
# v1.13.23 (2023-05-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.22 (2023-05-04)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.21 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.20 (2023-04-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.19 (2023-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.18 (2023-03-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.17 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.15 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.14 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.13 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.12 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.11 (2023-02-01)
* No change notes available for this release.
# v1.13.10 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.9 (2023-01-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.8 (2023-01-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.7 (2022-12-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.6 (2022-12-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.5 (2022-12-15)
* **Bug Fix**: Unify logic between shared config and in finding home directory
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.4 (2022-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.3 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.2 (2022-11-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.1 (2022-11-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.0 (2022-11-11)
* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
# v1.12.24 (2022-11-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.23 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.22 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.21 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.20 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.19 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.18 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.17 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.16 (2022-08-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.15 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.14 (2022-08-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.13 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.12 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.11 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.10 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.9 (2022-07-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.8 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.7 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.6 (2022-06-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.5 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.4 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.3 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.2 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.1 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-04-25)
* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider.

View File

@ -1,27 +1,27 @@
// Package ec2rolecreds provides the credentials provider implementation for
// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS.
//
// Concurrency and caching
// # Concurrency and caching
//
// The Provider is not safe to be used concurrently, and does not provide any
// caching of credentials retrieved. You should wrap the Provider with a
// `aws.CredentialsCache` to provide concurrency safety, and caching of
// credentials.
//
// Loading credentials with the SDK's AWS Config
// # Loading credentials with the SDK's AWS Config
//
// The EC2 Instance role credentials provider will automatically be the resolved
// credential provider int he credential chain if no other credential provider is
// credential provider in the credential chain if no other credential provider is
// resolved first.
//
// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance
// role for credentials, you specify a `credentials_source` property in the config
// profile the SDK will load.
//
// [default]
// credential_source = Ec2InstanceMetadata
// [default]
// credential_source = Ec2InstanceMetadata
//
// Loading credentials with the Provider directly
// # Loading credentials with the Provider directly
//
// Another way to use the EC2 Instance role credentials provider is to create it
// directly and assign it as the credentials provider for an API client.
@ -30,28 +30,28 @@
// it with the CredentialsCache before assigning the provider to the Amazon S3 API
// client's Credentials option.
//
// provider := imds.New(imds.Options{})
// provider := imds.New(imds.Options{})
//
// // Create the service client value configured for credentials.
// svc := s3.New(s3.Options{
// Credentials: aws.NewCredentialsCache(provider),
// })
// // Create the service client value configured for credentials.
// svc := s3.New(s3.Options{
// Credentials: aws.NewCredentialsCache(provider),
// })
//
// If you need more control, you can set the configuration options on the
// credentials provider using the imds.Options type to configure the EC2 IMDS
// API Client and ExpiryWindow of the retrieved credentials.
//
// provider := imds.New(imds.Options{
// // See imds.Options type's documentation for more options available.
// Client: imds.New(Options{
// HTTPClient: customHTTPClient,
// }),
// provider := imds.New(imds.Options{
// // See imds.Options type's documentation for more options available.
// Client: imds.New(Options{
// HTTPClient: customHTTPClient,
// }),
//
// // Modify how soon credentials expire prior to their original expiry time.
// ExpiryWindow: 5 * time.Minute,
// })
// // Modify how soon credentials expire prior to their original expiry time.
// ExpiryWindow: 5 * time.Minute,
// })
//
// EC2 IMDS API Client
// # EC2 IMDS API Client
//
// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on
// configuring the client, and options available.

View File

@ -33,9 +33,9 @@ type GetMetadataAPIClient interface {
//
// The New function must be used to create the with a custom EC2 IMDS client.
//
// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{
// o.Client = imds.New(imds.Options{/* custom options */})
// })
// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{
// o.Client = imds.New(imds.Options{/* custom options */})
// })
type Provider struct {
options Options
}

View File

@ -7,26 +7,29 @@
//
// Static credentials will never expire once they have been retrieved. The format
// of the static credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// }
//
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// }
//
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
// value in the response. The format of the refreshable credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// "Token" : "AQoDY....=",
// "Expiration" : "2016-02-25T06:03:31Z"
// }
//
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// "Token" : "AQoDY....=",
// "Expiration" : "2016-02-25T06:03:31Z"
// }
//
// Errors should be returned in the following format and only returned with 400
// or 500 HTTP status codes.
// {
// "code": "ErrorCode",
// "message": "Helpful error message."
// }
//
// {
// "code": "ErrorCode",
// "message": "Helpful error message."
// }
package endpointcreds
import (

View File

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.0"
const goModuleVersion = "1.13.24"

View File

@ -7,14 +7,14 @@
// option, you should make sure that the config file is as locked down as possible
// using security best practices for your operating system.
//
// Concurrency and caching
// # Concurrency and caching
//
// The Provider is not safe to be used concurrently, and does not provide any
// caching of credentials retrieved. You should wrap the Provider with a
// `aws.CredentialsCache` to provide concurrency safety, and caching of
// credentials.
//
// Loading credentials with the SDKs AWS Config
// # Loading credentials with the SDKs AWS Config
//
// You can use credentials from a AWS shared config `credential_process` in a
// variety of ways.
@ -24,20 +24,20 @@
// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
//
// [default]
// credential_process = /command/to/call
// [default]
// credential_process = /command/to/call
//
// Loading configuration using external will use the credential process to
// retrieve credentials. NOTE: If there are credentials in the profile you are
// using, the credential process will not be used.
//
// // Initialize a session to load credentials.
// cfg, _ := config.LoadDefaultConfig(context.TODO())
// // Initialize a session to load credentials.
// cfg, _ := config.LoadDefaultConfig(context.TODO())
//
// // Create S3 service client to use the credentials.
// svc := s3.NewFromConfig(cfg)
// // Create S3 service client to use the credentials.
// svc := s3.NewFromConfig(cfg)
//
// Loading credentials with the Provider directly
// # Loading credentials with the Provider directly
//
// Another way to use the credentials process provider is by using the
// `NewProvider` constructor to create the provider and providing a it with a
@ -47,46 +47,46 @@
// it with the CredentialsCache before assigning the provider to the Amazon S3 API
// client's Credentials option.
//
// // Create credentials using the Provider.
// provider := processcreds.NewProvider("/path/to/command")
// // Create credentials using the Provider.
// provider := processcreds.NewProvider("/path/to/command")
//
// // Create the service client value configured for credentials.
// svc := s3.New(s3.Options{
// Credentials: aws.NewCredentialsCache(provider),
// })
// // Create the service client value configured for credentials.
// svc := s3.New(s3.Options{
// Credentials: aws.NewCredentialsCache(provider),
// })
//
// If you need more control, you can set any configurable options in the
// credentials using one or more option functions.
//
// provider := processcreds.NewProvider("/path/to/command",
// func(o *processcreds.Options) {
// // Override the provider's default timeout
// o.Timeout = 2 * time.Minute
// })
// provider := processcreds.NewProvider("/path/to/command",
// func(o *processcreds.Options) {
// // Override the provider's default timeout
// o.Timeout = 2 * time.Minute
// })
//
// You can also use your own `exec.Cmd` value by satisfying a value that satisfies
// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor.
//
// // Create an exec.Cmd
// cmdBuilder := processcreds.NewCommandBuilderFunc(
// func(ctx context.Context) (*exec.Cmd, error) {
// cmd := exec.CommandContext(ctx,
// "customCLICommand",
// "-a", "argument",
// )
// cmd.Env = []string{
// "ENV_VAR_FOO=value",
// "ENV_VAR_BAR=other_value",
// }
// // Create an exec.Cmd
// cmdBuilder := processcreds.NewCommandBuilderFunc(
// func(ctx context.Context) (*exec.Cmd, error) {
// cmd := exec.CommandContext(ctx,
// "customCLICommand",
// "-a", "argument",
// )
// cmd.Env = []string{
// "ENV_VAR_FOO=value",
// "ENV_VAR_BAR=other_value",
// }
//
// return cmd, nil
// },
// )
// return cmd, nil
// },
// )
//
// // Create credentials using your exec.Cmd and custom timeout
// provider := processcreds.NewProviderCommand(cmdBuilder,
// func(opt *processcreds.Provider) {
// // optionally override the provider's default timeout
// opt.Timeout = 1 * time.Second
// })
// // Create credentials using your exec.Cmd and custom timeout
// provider := processcreds.NewProviderCommand(cmdBuilder,
// func(opt *processcreds.Provider) {
// // optionally override the provider's default timeout
// opt.Timeout = 1 * time.Second
// })
package processcreds

View File

@ -149,12 +149,24 @@ func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *P
return p
}
type credentialProcessResponse struct {
Version int
AccessKeyID string `json:"AccessKeyId"`
// A CredentialProcessResponse is the AWS credentials format that must be
// returned when executing an external credential_process.
type CredentialProcessResponse struct {
// As of this writing, the Version key must be set to 1. This might
// increment over time as the structure evolves.
Version int
// The access key ID that identifies the temporary security credentials.
AccessKeyID string `json:"AccessKeyId"`
// The secret access key that can be used to sign requests.
SecretAccessKey string
SessionToken string
Expiration *time.Time
// The token that users must pass to the service API to use the temporary credentials.
SessionToken string
// The date on which the current credentials expire.
Expiration *time.Time
}
// Retrieve executes the credential process command and returns the
@ -166,7 +178,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
}
// Serialize and validate response
resp := &credentialProcessResponse{}
resp := &CredentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return aws.Credentials{Source: ProviderName}, &ProviderError{
Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err),

View File

@ -1,63 +1,81 @@
// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
// Package ssocreds provides a credential provider for retrieving temporary AWS
// credentials using an SSO access token.
//
// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
// IMPORTANT: The provider in this package does not initiate or perform the AWS
// SSO login flow. The SDK provider expects that you have already performed the
// SSO login flow using AWS CLI using the "aws sso login" command, or by some
// other mechanism. The provider must find a valid non-expired access token for
// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not
// found, it is expired, or the file is malformed an error will be returned.
//
// Loading AWS SSO credentials with the AWS shared configuration file
// # Loading AWS SSO credentials with the AWS shared configuration file
//
// You can use configure AWS SSO credentials from the AWS shared configuration file by
// providing the specifying the required keys in the profile:
// specifying the required keys in the profile and referencing an sso-session:
//
// sso_account_id
// sso_region
// sso_role_name
// sso_start_url
// sso_session
// sso_account_id
// sso_role_name
//
// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
// For example, the following defines a profile "devsso" and specifies the AWS
// SSO parameters that defines the target account, role, sign-on portal, and
// the region where the user portal is located. Note: all SSO arguments must be
// provided, or an error will be returned.
//
// [profile devsso]
// sso_start_url = https://my-sso-portal.awsapps.com/start
// sso_role_name = SSOReadOnlyRole
// sso_region = us-east-1
// sso_account_id = 123456789012
// [profile devsso]
// sso_session = dev-session
// sso_role_name = SSOReadOnlyRole
// sso_account_id = 123456789012
//
// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
// retrieve credentials. For example:
// [sso-session dev-session]
// sso_start_url = https://my-sso-portal.awsapps.com/start
// sso_region = us-east-1
// sso_registration_scopes = sso:account:access
//
// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso"))
// if err != nil {
// return err
// }
// Using the config module, you can load the AWS SDK shared configuration, and
// specify that this profile be used to retrieve credentials. For example:
//
// Programmatically loading AWS SSO credentials directly
// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso"))
// if err != nil {
// return err
// }
//
// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
// # Programmatically loading AWS SSO credentials directly
//
// client := sso.NewFromConfig(cfg)
// You can programmatically construct the AWS SSO Provider in your application,
// and provide the necessary information to load and retrieve temporary
// credentials using an access token from ~/.aws/sso/cache.
//
// var provider aws.CredentialsProvider
// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start")
// ssoClient := sso.NewFromConfig(cfg)
// ssoOidcClient := ssooidc.NewFromConfig(cfg)
// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session")
// if err != nil {
// return err
// }
//
// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
// provider = aws.NewCredentialsCache(provider)
// var provider aws.CredentialsProvider
// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) {
// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath)
// })
//
// credentials, err := provider.Retrieve(context.TODO())
// if err != nil {
// return err
// }
// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
// provider = aws.NewCredentialsCache(provider)
//
// It is important that you wrap the Provider with aws.CredentialsCache if you are programmatically constructing the
// provider directly. This prevents your application from accessing the cached access token and requesting new
// credentials, err := provider.Retrieve(context.TODO())
// if err != nil {
// return err
// }
//
// It is important that you wrap the Provider with aws.CredentialsCache if you
// are programmatically constructing the provider directly. This prevents your
// application from accessing the cached access token and requesting new
// credentials each time the credentials are used.
//
// Additional Resources
// # Additional Resources
//
// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
// Configuring the AWS CLI to use AWS Single Sign-On:
// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
//
// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
// AWS Single Sign-On User Guide:
// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
package ssocreds

View File

@ -1,10 +0,0 @@
//go:build !windows
// +build !windows
package ssocreds
import "os"
func getHomeDirectory() string {
return os.Getenv("HOME")
}

View File

@ -1,7 +0,0 @@
package ssocreds
import "os"
func getHomeDirectory() string {
return os.Getenv("USERPROFILE")
}

View File

@ -1,184 +0,0 @@
package ssocreds
import (
"context"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/aws-sdk-go-v2/service/sso"
)
// ProviderName is the name of the provider used to specify the source of credentials.
const ProviderName = "SSOProvider"
var defaultCacheLocation func() string
func defaultCacheLocationImpl() string {
return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
}
func init() {
defaultCacheLocation = defaultCacheLocationImpl
}
// GetRoleCredentialsAPIClient is a API client that implements the GetRoleCredentials operation.
type GetRoleCredentialsAPIClient interface {
GetRoleCredentials(ctx context.Context, params *sso.GetRoleCredentialsInput, optFns ...func(*sso.Options)) (*sso.GetRoleCredentialsOutput, error)
}
// Options is the Provider options structure.
type Options struct {
// The Client which is configured for the AWS Region where the AWS SSO user portal is located.
Client GetRoleCredentialsAPIClient
// The AWS account that is assigned to the user.
AccountID string
// The role name that is assigned to the user.
RoleName string
// The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
StartURL string
}
// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
type Provider struct {
options Options
}
// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
// for the AWS Region where the AWS SSO user portal is located.
func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider {
options := Options{
Client: client,
AccountID: accountID,
RoleName: roleName,
StartURL: startURL,
}
for _, fn := range optFns {
fn(&options)
}
return &Provider{
options: options,
}
}
// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
// by exchanging the accessToken present in ~/.aws/sso/cache.
func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
tokenFile, err := loadTokenFile(p.options.StartURL)
if err != nil {
return aws.Credentials{}, err
}
output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
AccessToken: &tokenFile.AccessToken,
AccountId: &p.options.AccountID,
RoleName: &p.options.RoleName,
})
if err != nil {
return aws.Credentials{}, err
}
return aws.Credentials{
AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId),
SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey),
SessionToken: aws.ToString(output.RoleCredentials.SessionToken),
Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
CanExpire: true,
Source: ProviderName,
}, nil
}
func getCacheFileName(url string) (string, error) {
hash := sha1.New()
_, err := hash.Write([]byte(url))
if err != nil {
return "", err
}
return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil
}
type rfc3339 time.Time
func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
var value string
if err := json.Unmarshal(bytes, &value); err != nil {
return err
}
parse, err := time.Parse(time.RFC3339, value)
if err != nil {
return fmt.Errorf("expected RFC3339 timestamp: %w", err)
}
*r = rfc3339(parse)
return nil
}
type token struct {
AccessToken string `json:"accessToken"`
ExpiresAt rfc3339 `json:"expiresAt"`
Region string `json:"region,omitempty"`
StartURL string `json:"startUrl,omitempty"`
}
func (t token) Expired() bool {
return sdk.NowTime().Round(0).After(time.Time(t.ExpiresAt))
}
// InvalidTokenError is the error type that is returned if loaded token has expired or is otherwise invalid.
// To refresh the SSO session run aws sso login with the corresponding profile.
type InvalidTokenError struct {
Err error
}
func (i *InvalidTokenError) Unwrap() error {
return i.Err
}
func (i *InvalidTokenError) Error() string {
const msg = "the SSO session has expired or is invalid"
if i.Err == nil {
return msg
}
return msg + ": " + i.Err.Error()
}
func loadTokenFile(startURL string) (t token, err error) {
key, err := getCacheFileName(startURL)
if err != nil {
return token{}, &InvalidTokenError{Err: err}
}
fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key))
if err != nil {
return token{}, &InvalidTokenError{Err: err}
}
if err := json.Unmarshal(fileBytes, &t); err != nil {
return token{}, &InvalidTokenError{Err: err}
}
if len(t.AccessToken) == 0 {
return token{}, &InvalidTokenError{}
}
if t.Expired() {
return token{}, &InvalidTokenError{Err: fmt.Errorf("access token is expired")}
}
return t, nil
}

View File

@ -0,0 +1,233 @@
package ssocreds
import (
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
)
var osUserHomeDur = shareddefaults.UserHomeDir
// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
// error if unable get derive the path. Key that will be used to compute a SHA1
// value that is hex encoded.
//
// Derives the filepath using the Key as:
//
// ~/.aws/sso/cache/<sha1-hex-encoded-key>.json
func StandardCachedTokenFilepath(key string) (string, error) {
homeDir := osUserHomeDur()
if len(homeDir) == 0 {
return "", fmt.Errorf("unable to get USER's home directory for cached token")
}
hash := sha1.New()
if _, err := hash.Write([]byte(key)); err != nil {
return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err)
}
cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json"
return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil
}
type tokenKnownFields struct {
AccessToken string `json:"accessToken,omitempty"`
ExpiresAt *rfc3339 `json:"expiresAt,omitempty"`
RefreshToken string `json:"refreshToken,omitempty"`
ClientID string `json:"clientId,omitempty"`
ClientSecret string `json:"clientSecret,omitempty"`
}
type token struct {
tokenKnownFields
UnknownFields map[string]interface{} `json:"-"`
}
func (t token) MarshalJSON() ([]byte, error) {
fields := map[string]interface{}{}
setTokenFieldString(fields, "accessToken", t.AccessToken)
setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt)
setTokenFieldString(fields, "refreshToken", t.RefreshToken)
setTokenFieldString(fields, "clientId", t.ClientID)
setTokenFieldString(fields, "clientSecret", t.ClientSecret)
for k, v := range t.UnknownFields {
if _, ok := fields[k]; ok {
return nil, fmt.Errorf("unknown token field %v, duplicates known field", k)
}
fields[k] = v
}
return json.Marshal(fields)
}
func setTokenFieldString(fields map[string]interface{}, key, value string) {
if value == "" {
return
}
fields[key] = value
}
func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) {
if value == nil {
return
}
fields[key] = value
}
func (t *token) UnmarshalJSON(b []byte) error {
var fields map[string]interface{}
if err := json.Unmarshal(b, &fields); err != nil {
return nil
}
t.UnknownFields = map[string]interface{}{}
for k, v := range fields {
var err error
switch k {
case "accessToken":
err = getTokenFieldString(v, &t.AccessToken)
case "expiresAt":
err = getTokenFieldRFC3339(v, &t.ExpiresAt)
case "refreshToken":
err = getTokenFieldString(v, &t.RefreshToken)
case "clientId":
err = getTokenFieldString(v, &t.ClientID)
case "clientSecret":
err = getTokenFieldString(v, &t.ClientSecret)
default:
t.UnknownFields[k] = v
}
if err != nil {
return fmt.Errorf("field %q, %w", k, err)
}
}
return nil
}
func getTokenFieldString(v interface{}, value *string) error {
var ok bool
*value, ok = v.(string)
if !ok {
return fmt.Errorf("expect value to be string, got %T", v)
}
return nil
}
func getTokenFieldRFC3339(v interface{}, value **rfc3339) error {
var stringValue string
if err := getTokenFieldString(v, &stringValue); err != nil {
return err
}
timeValue, err := parseRFC3339(stringValue)
if err != nil {
return err
}
*value = &timeValue
return nil
}
func loadCachedToken(filename string) (token, error) {
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err)
}
var t token
if err := json.Unmarshal(fileBytes, &t); err != nil {
return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err)
}
if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() {
return token{}, fmt.Errorf(
"cached SSO token must contain accessToken and expiresAt fields")
}
return t, nil
}
func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) {
tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10)
if err := writeCacheFile(tmpFilename, fileMode, t); err != nil {
return err
}
if err := os.Rename(tmpFilename, filename); err != nil {
return fmt.Errorf("failed to replace old cached SSO token file, %w", err)
}
return nil
}
func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) {
var f *os.File
f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode)
if err != nil {
return fmt.Errorf("failed to create cached SSO token file %w", err)
}
defer func() {
closeErr := f.Close()
if err == nil && closeErr != nil {
err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr)
}
}()
encoder := json.NewEncoder(f)
if err = encoder.Encode(t); err != nil {
return fmt.Errorf("failed to serialize cached SSO token, %w", err)
}
return nil
}
type rfc3339 time.Time
func parseRFC3339(v string) (rfc3339, error) {
parsed, err := time.Parse(time.RFC3339, v)
if err != nil {
return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err)
}
return rfc3339(parsed), nil
}
func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) {
var value string
// Use JSON unmarshal to unescape the quoted value making use of JSON's
// unquoting rules.
if err = json.Unmarshal(bytes, &value); err != nil {
return err
}
*r, err = parseRFC3339(value)
return nil
}
func (r *rfc3339) MarshalJSON() ([]byte, error) {
value := time.Time(*r).Format(time.RFC3339)
// Use JSON unmarshal to unescape the quoted value making use of JSON's
// quoting rules.
return json.Marshal(value)
}

View File

@ -0,0 +1,152 @@
package ssocreds
import (
"context"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/aws-sdk-go-v2/service/sso"
)
// ProviderName is the name of the provider used to specify the source of
// credentials.
const ProviderName = "SSOProvider"
// GetRoleCredentialsAPIClient is a API client that implements the
// GetRoleCredentials operation.
type GetRoleCredentialsAPIClient interface {
GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) (
*sso.GetRoleCredentialsOutput, error,
)
}
// Options is the Provider options structure.
type Options struct {
// The Client which is configured for the AWS Region where the AWS SSO user
// portal is located.
Client GetRoleCredentialsAPIClient
// The AWS account that is assigned to the user.
AccountID string
// The role name that is assigned to the user.
RoleName string
// The URL that points to the organization's AWS Single Sign-On (AWS SSO)
// user portal.
StartURL string
// The filepath the cached token will be retrieved from. If unset Provider will
// use the startURL to determine the filepath at.
//
// ~/.aws/sso/cache/<sha1-hex-encoded-startURL>.json
//
// If custom cached token filepath is used, the Provider's startUrl
// parameter will be ignored.
CachedTokenFilepath string
// Used by the SSOCredentialProvider if a token configuration
// profile is used in the shared config
SSOTokenProvider *SSOTokenProvider
}
// Provider is an AWS credential provider that retrieves temporary AWS
// credentials by exchanging an SSO login token.
type Provider struct {
options Options
cachedTokenFilepath string
}
// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The
// provided client is expected to be configured for the AWS Region where the
// AWS SSO user portal is located.
func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider {
options := Options{
Client: client,
AccountID: accountID,
RoleName: roleName,
StartURL: startURL,
}
for _, fn := range optFns {
fn(&options)
}
return &Provider{
options: options,
cachedTokenFilepath: options.CachedTokenFilepath,
}
}
// Retrieve retrieves temporary AWS credentials from the configured Amazon
// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present
// in ~/.aws/sso/cache. However, if a token provider configuration exists
// in the shared config, then we ought to use the token provider rather then
// direct access on the cached token.
func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
var accessToken *string
if p.options.SSOTokenProvider != nil {
token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx)
if err != nil {
return aws.Credentials{}, err
}
accessToken = &token.Value
} else {
if p.cachedTokenFilepath == "" {
cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL)
if err != nil {
return aws.Credentials{}, &InvalidTokenError{Err: err}
}
p.cachedTokenFilepath = cachedTokenFilepath
}
tokenFile, err := loadCachedToken(p.cachedTokenFilepath)
if err != nil {
return aws.Credentials{}, &InvalidTokenError{Err: err}
}
if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) {
return aws.Credentials{}, &InvalidTokenError{}
}
accessToken = &tokenFile.AccessToken
}
output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
AccessToken: accessToken,
AccountId: &p.options.AccountID,
RoleName: &p.options.RoleName,
})
if err != nil {
return aws.Credentials{}, err
}
return aws.Credentials{
AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId),
SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey),
SessionToken: aws.ToString(output.RoleCredentials.SessionToken),
CanExpire: true,
Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
Source: ProviderName,
}, nil
}
// InvalidTokenError is the error type that is returned if loaded token has
// expired or is otherwise invalid. To refresh the SSO session run AWS SSO
// login with the corresponding profile.
type InvalidTokenError struct {
Err error
}
func (i *InvalidTokenError) Unwrap() error {
return i.Err
}
func (i *InvalidTokenError) Error() string {
const msg = "the SSO session has expired or is invalid"
if i.Err == nil {
return msg
}
return msg + ": " + i.Err.Error()
}

View File

@ -0,0 +1,147 @@
package ssocreds
import (
"context"
"fmt"
"os"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/aws-sdk-go-v2/service/ssooidc"
"github.com/aws/smithy-go/auth/bearer"
)
// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API
// client for calling CreateToken operation to refresh the SSO token.
type CreateTokenAPIClient interface {
CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) (
*ssooidc.CreateTokenOutput, error,
)
}
// SSOTokenProviderOptions provides the options for configuring the
// SSOTokenProvider.
type SSOTokenProviderOptions struct {
// Client that can be overridden
Client CreateTokenAPIClient
// The set of API Client options to be applied when invoking the
// CreateToken operation.
ClientOptions []func(*ssooidc.Options)
// The path the file containing the cached SSO token will be read from.
// Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter.
CachedTokenFilepath string
}
// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for
// Bearer Authentication. The SSOTokenProvider can only be used to refresh
// already cached SSO Tokens. This utility cannot perform the initial SSO
// create token.
//
// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in
// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's
// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with
// the smithy-go TokenCache, if the external configuration loaded configured
// for an SSO session.
//
// The initial SSO create token should be preformed with the AWS CLI before the
// Go application using the SSOTokenProvider will need to retrieve the SSO
// token. If the AWS CLI has not created the token cache file, this provider
// will return an error when attempting to retrieve the cached token.
//
// This provider will attempt to refresh the cached SSO token periodically if
// needed when RetrieveBearerToken is called.
//
// A utility such as the AWS CLI must be used to initially create the SSO
// session and cached token file.
// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
type SSOTokenProvider struct {
options SSOTokenProviderOptions
}
var _ bearer.TokenProvider = (*SSOTokenProvider)(nil)
// NewSSOTokenProvider returns an initialized SSOTokenProvider that will
// periodically refresh the SSO token cached stored in the cachedTokenFilepath.
// The cachedTokenFilepath file's content will be rewritten by the token
// provider when the token is refreshed.
//
// The client must be configured for the AWS region the SSO token was created for.
func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider {
options := SSOTokenProviderOptions{
Client: client,
CachedTokenFilepath: cachedTokenFilepath,
}
for _, fn := range optFns {
fn(&options)
}
provider := &SSOTokenProvider{
options: options,
}
return provider
}
// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath
// the SSOTokenProvider was created with. If the token has expired
// RetrieveBearerToken will attempt to refresh it. If the token cannot be
// refreshed or is not present an error will be returned.
//
// A utility such as the AWS CLI must be used to initially create the SSO
// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) {
cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath)
if err != nil {
return bearer.Token{}, err
}
if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) {
cachedToken, err = p.refreshToken(ctx, cachedToken)
if err != nil {
return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err)
}
}
expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt))
return bearer.Token{
Value: cachedToken.AccessToken,
CanExpire: !expiresAt.IsZero(),
Expires: expiresAt,
}, nil
}
func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) {
if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" {
return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed")
}
createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{
ClientId: &cachedToken.ClientID,
ClientSecret: &cachedToken.ClientSecret,
RefreshToken: &cachedToken.RefreshToken,
GrantType: aws.String("refresh_token"),
}, p.options.ClientOptions...)
if err != nil {
return token{}, fmt.Errorf("unable to refresh SSO token, %w", err)
}
expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second)
cachedToken.AccessToken = aws.ToString(createResult.AccessToken)
cachedToken.ExpiresAt = (*rfc3339)(&expiresAt)
cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken)
fileInfo, err := os.Stat(p.options.CachedTokenFilepath)
if err != nil {
return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err)
}
if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil {
return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err)
}
return cachedToken, nil
}

View File

@ -8,31 +8,31 @@
// ensure synchronous usage of the AssumeRoleProvider if the value is shared
// between multiple Credentials or service clients.
//
// Assume Role
// # Assume Role
//
// To assume an IAM role using STS with the SDK you can create a new Credentials
// with the SDKs's stscreds package.
//
// // Initial credentials loaded from SDK's default credential chain. Such as
// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
// // Role. These credentials will be used to to make the STS Assume Role API.
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
// // Initial credentials loaded from SDK's default credential chain. Such as
// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
// // Role. These credentials will be used to to make the STS Assume Role API.
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
//
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN.
// stsSvc := sts.NewFromConfig(cfg)
// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN.
// stsSvc := sts.NewFromConfig(cfg)
// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn")
//
// cfg.Credentials = aws.NewCredentialsCache(creds)
// cfg.Credentials = aws.NewCredentialsCache(creds)
//
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
//
// Assume Role with custom MFA Token provider
// # Assume Role with custom MFA Token provider
//
// To assume an IAM role with a MFA token you can either specify a custom MFA
// token provider or use the SDK's built in StdinTokenProvider that will prompt
@ -43,29 +43,29 @@
// With a custom token provider, the provider is responsible for refreshing the
// token code when called.
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
//
// staticTokenProvider := func() (string, error) {
// return someTokenCode, nil
// }
// staticTokenProvider := func() (string, error) {
// return someTokenCode, nil
// }
//
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN using the MFA token code provided.
// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
// o.SerialNumber = aws.String("myTokenSerialNumber")
// o.TokenProvider = staticTokenProvider
// })
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN using the MFA token code provided.
// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
// o.SerialNumber = aws.String("myTokenSerialNumber")
// o.TokenProvider = staticTokenProvider
// })
//
// cfg.Credentials = aws.NewCredentialsCache(creds)
// cfg.Credentials = aws.NewCredentialsCache(creds)
//
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
//
// Assume Role with MFA Token Provider
// # Assume Role with MFA Token Provider
//
// To assume an IAM role with MFA for longer running tasks where the credentials
// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
@ -80,23 +80,23 @@
// have undesirable results as the StdinTokenProvider will not be synchronized. A
// single Credentials with an AssumeRoleProvider can be shared safely.
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
//
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN using the MFA token code provided.
// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
// o.SerialNumber = aws.String("myTokenSerialNumber")
// o.TokenProvider = stscreds.StdinTokenProvider
// })
// // Create the credentials from AssumeRoleProvider to assume the role
// // referenced by the "myRoleARN" ARN using the MFA token code provided.
// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) {
// o.SerialNumber = aws.String("myTokenSerialNumber")
// o.TokenProvider = stscreds.StdinTokenProvider
// })
//
// cfg.Credentials = aws.NewCredentialsCache(creds)
// cfg.Credentials = aws.NewCredentialsCache(creds)
//
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
// // Create service client value configured for credentials
// // from assumed role.
// svc := s3.NewFromConfig(cfg)
package stscreds
import (

View File

@ -2,16 +2,16 @@
//
// aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language.
//
// Getting started
// # Getting started
//
// The best way to get started working with the SDK is to use `go get` to add the
// SDK and desired service clients to your Go dependencies explicitly.
//
// go get github.com/aws/aws-sdk-go-v2
// go get github.com/aws/aws-sdk-go-v2
// go get github.com/aws/aws-sdk-go-v2/config
// go get github.com/aws/aws-sdk-go-v2/service/dynamodb
//
// Hello AWS
// # Hello AWS
//
// This example shows how you can use the v2 SDK to make an API request using the
// SDK's Amazon DynamoDB client.

View File

@ -1,3 +1,100 @@
# v1.13.3 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.2 (2023-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.1 (2023-03-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.0 (2023-03-14)
* **Feature**: Add flag to disable IMDSv1 fallback
# v1.12.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.23 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.22 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.21 (2022-12-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.20 (2022-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.19 (2022-10-24)
* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.18 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.16 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.15 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.14 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.13 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.12 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.11 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.10 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.9 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.8 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.7 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -106,8 +106,10 @@ func New(options Options, optFns ...func(*Options)) *Client {
// or adding custom middleware behavior.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
HTTPClient: cfg.HTTPClient,
APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
HTTPClient: cfg.HTTPClient,
ClientLogMode: cfg.ClientLogMode,
Logger: cfg.Logger,
}
if cfg.Retryer != nil {
@ -172,6 +174,16 @@ type Options struct {
// The logger writer interface to write logging messages to.
Logger logging.Logger
// Configure IMDSv1 fallback behavior. By default, the client will attempt
// to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary]
// the client will return any errors encountered from attempting to fetch a token
// instead of silently using the insecure data flow of IMDSv1.
//
// See [configuring IMDS] for more information.
//
// [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
EnableFallback aws.Ternary
// provides the caching of API tokens used for operation calls. If unset,
// the API token will not be retrieved for the operation.
tokenProvider *tokenProvider

View File

@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.4"
const goModuleVersion = "1.13.3"

View File

@ -86,6 +86,21 @@ func addRequestMiddleware(stack *middleware.Stack,
return err
}
err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
LogRequest: options.ClientLogMode.IsRequest(),
LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(),
LogResponse: options.ClientLogMode.IsResponse(),
LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
if err != nil {
return err
}
err = addSetLoggerMiddleware(stack, options)
if err != nil {
return err
}
// Retry support
return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
Retryer: options.Retryer,
@ -93,6 +108,10 @@ func addRequestMiddleware(stack *middleware.Stack,
})
}
func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
type serializeRequest struct {
GetPath func(interface{}) (string, error)
Method string

View File

@ -4,12 +4,14 @@ import (
"context"
"errors"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"net/http"
"sync"
"sync/atomic"
"time"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@ -68,7 +70,7 @@ func (t *tokenProvider) HandleFinalize(
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
if !t.enabled() {
if t.fallbackEnabled() && !t.enabled() {
// short-circuits to insecure data flow if token provider is disabled.
return next.HandleFinalize(ctx, input)
}
@ -115,23 +117,15 @@ func (t *tokenProvider) HandleDeserialize(
}
if resp.StatusCode == http.StatusUnauthorized { // unauthorized
err = &retryableError{Err: err}
t.enable()
err = &retryableError{Err: err, isRetryable: true}
}
return out, metadata, err
}
type retryableError struct {
Err error
}
func (*retryableError) RetryableError() bool { return true }
func (e *retryableError) Error() string { return e.Err.Error() }
func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
if !t.enabled() {
if t.fallbackEnabled() && !t.enabled() {
return nil, &bypassTokenRetrievalError{
Err: fmt.Errorf("cannot get API token, provider disabled"),
}
@ -147,7 +141,7 @@ func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error)
tok, err = t.updateToken(ctx)
if err != nil {
return nil, fmt.Errorf("cannot get API token, %w", err)
return nil, err
}
return tok, nil
@ -167,17 +161,19 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
TokenTTL: t.tokenTTL,
})
if err != nil {
// change the disabled flag on token provider to true, when error is request timeout error.
var statusErr interface{ HTTPStatusCode() int }
if errors.As(err, &statusErr) {
switch statusErr.HTTPStatusCode() {
// Disable get token if failed because of 403, 404, or 405
// Disable future get token if failed because of 403, 404, or 405
case http.StatusForbidden,
http.StatusNotFound,
http.StatusMethodNotAllowed:
t.disable()
if t.fallbackEnabled() {
logger := middleware.GetLogger(ctx)
logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err)
t.disable()
}
// 400 errors are terminal, and need to be upstreamed
case http.StatusBadRequest:
@ -192,8 +188,17 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
atomic.StoreUint32(&t.disabled, 1)
}
// Token couldn't be retrieved, but bypass this, and allow the
// request to continue.
if !t.fallbackEnabled() {
// NOTE: getToken() is an implementation detail of some outer operation
// (e.g. GetMetadata). It has its own retries that have already been exhausted.
// Mark the underlying error as a terminal error.
err = &retryableError{Err: err, isRetryable: false}
return nil, err
}
// Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request
// and allow the request to proceed. Future requests _may_ re-attempt fetching a
// token if not disabled.
return nil, &bypassTokenRetrievalError{Err: err}
}
@ -206,21 +211,21 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
return tok, nil
}
type bypassTokenRetrievalError struct {
Err error
}
func (e *bypassTokenRetrievalError) Error() string {
return fmt.Sprintf("bypass token retrieval, %v", e.Err)
}
func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
// enabled returns if the token provider is current enabled or not.
func (t *tokenProvider) enabled() bool {
return atomic.LoadUint32(&t.disabled) == 0
}
// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise
func (t *tokenProvider) fallbackEnabled() bool {
switch t.client.options.EnableFallback {
case aws.FalseTernary:
return false
default:
return true
}
}
// disable disables the token provider and it will no longer attempt to inject
// the token, nor request updates.
func (t *tokenProvider) disable() {
@ -235,3 +240,22 @@ func (t *tokenProvider) enable() {
t.tokenMux.Unlock()
atomic.StoreUint32(&t.disabled, 0)
}
type bypassTokenRetrievalError struct {
Err error
}
func (e *bypassTokenRetrievalError) Error() string {
return fmt.Sprintf("bypass token retrieval, %v", e.Err)
}
func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
type retryableError struct {
Err error
isRetryable bool
}
func (e *retryableError) RetryableError() bool { return e.isRetryable }
func (e *retryableError) Error() string { return e.Err.Error() }

View File

@ -0,0 +1,385 @@
# v1.11.67 (2023-05-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.66 (2023-05-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.65 (2023-05-04)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.64 (2023-04-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.63 (2023-04-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.62 (2023-04-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.61 (2023-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.60 (2023-03-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.59 (2023-03-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.58 (2023-03-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.57 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.56 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.55 (2023-02-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.54 (2023-02-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.53 (2023-02-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.52 (2023-02-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.51 (2023-02-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.50 (2023-02-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.49 (2023-01-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.48 (2023-01-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.47 (2023-01-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.46 (2022-12-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.45 (2022-12-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.44 (2022-12-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.43 (2022-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.42 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.41 (2022-11-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.40 (2022-11-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.39 (2022-11-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.38 (2022-11-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.37 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.36 (2022-10-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.35 (2022-10-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.34 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.33 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.32 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.31 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.30 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.29 (2022-08-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.28 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.27 (2022-08-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.26 (2022-08-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.25 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.24 (2022-08-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.23 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.22 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.21 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.20 (2022-07-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.19 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.18 (2022-07-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.17 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.16 (2022-06-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.15 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.14 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.13 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.12 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.11 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.10 (2022-05-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.9 (2022-05-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.8 (2022-05-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.7 (2022-04-27)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.6 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.5 (2022-04-12)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.4 (2022-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.1 (2022-01-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.5 (2021-12-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.4 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.3 (2021-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.2 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.1 (2021-11-12)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.0 (2021-11-06)
* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.6.0 (2021-10-21)
* **Feature**: Updated to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.4 (2021-10-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.3 (2021-09-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.2 (2021-09-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.1 (2021-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.5.0 (2021-08-27)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.4.1 (2021-08-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.4.0 (2021-08-04)
* **Feature**: adds error handling for defered close calls
* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.2 (2021-07-15)
* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.1 (2021-07-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.3.0 (2021-06-25)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.3 (2021-06-04)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.2 (2021-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.1 (2021-05-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.0 (2021-05-14)
* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
* **Dependency Update**: Updated to the latest SDK module versions

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,37 @@
package manager
import (
"context"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
// DeleteObjectsAPIClient is an S3 API client that can invoke the DeleteObjects operation.
type DeleteObjectsAPIClient interface {
DeleteObjects(context.Context, *s3.DeleteObjectsInput, ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
}
// DownloadAPIClient is an S3 API client that can invoke the GetObject operation.
type DownloadAPIClient interface {
GetObject(context.Context, *s3.GetObjectInput, ...func(*s3.Options)) (*s3.GetObjectOutput, error)
}
// HeadBucketAPIClient is an S3 API client that can invoke the HeadBucket operation.
type HeadBucketAPIClient interface {
HeadBucket(context.Context, *s3.HeadBucketInput, ...func(*s3.Options)) (*s3.HeadBucketOutput, error)
}
// ListObjectsV2APIClient is an S3 API client that can invoke the ListObjectV2 operation.
type ListObjectsV2APIClient interface {
ListObjectsV2(context.Context, *s3.ListObjectsV2Input, ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
}
// UploadAPIClient is an S3 API client that can invoke PutObject, UploadPart, CreateMultipartUpload,
// CompleteMultipartUpload, and AbortMultipartUpload operations.
type UploadAPIClient interface {
PutObject(context.Context, *s3.PutObjectInput, ...func(*s3.Options)) (*s3.PutObjectOutput, error)
UploadPart(context.Context, *s3.UploadPartInput, ...func(*s3.Options)) (*s3.UploadPartOutput, error)
CreateMultipartUpload(context.Context, *s3.CreateMultipartUploadInput, ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error)
CompleteMultipartUpload(context.Context, *s3.CompleteMultipartUploadInput, ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error)
AbortMultipartUpload(context.Context, *s3.AbortMultipartUploadInput, ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error)
}

View File

@ -1,8 +1,8 @@
package s3manager
package manager
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go-v2/aws/arn"
)
func validateSupportedARNType(bucket string) error {

View File

@ -0,0 +1,139 @@
package manager
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
const bucketRegionHeader = "X-Amz-Bucket-Region"
// GetBucketRegion will attempt to get the region for a bucket using the
// client's configured region to determine which AWS partition to perform the query on.
//
// The request will not be signed, and will not use your AWS credentials.
//
// A BucketNotFound error will be returned if the bucket does not exist in the
// AWS partition the client region belongs to.
//
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2".
//
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// log.Println("error:", err)
// return
// }
//
// bucket := "my-bucket"
// region, err := manager.GetBucketRegion(ctx, s3.NewFromConfig(cfg), bucket)
// if err != nil {
// var bnf manager.BucketNotFound
// if errors.As(err, &bnf) {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region\n", bucket)
// }
// return
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// By default the request will be made to the Amazon S3 endpoint using the virtual-hosted-style addressing.
//
// bucketname.s3.us-west-2.amazonaws.com/
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
// fips-us-gov-west-1) set the EndpointResolver on the config or client the
// utility is called with.
//
// cfg, err := config.LoadDefaultConfig(context.TODO(),
// config.WithEndpointResolver(
// aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
// return aws.Endpoint{URL: "https://s3-fips.us-west-2.amazonaws.com"}, nil
// }),
// )
// if err != nil {
// panic(err)
// }
func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket string, optFns ...func(*s3.Options)) (string, error) {
var captureBucketRegion deserializeBucketRegion
clientOptionFns := make([]func(*s3.Options), len(optFns)+1)
clientOptionFns[0] = func(options *s3.Options) {
options.Credentials = aws.AnonymousCredentials{}
options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware)
}
copy(clientOptionFns[1:], optFns)
_, err := client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(bucket),
}, clientOptionFns...)
if len(captureBucketRegion.BucketRegion) == 0 && err != nil {
var httpStatusErr interface {
HTTPStatusCode() int
}
if !errors.As(err, &httpStatusErr) {
return "", err
}
if httpStatusErr.HTTPStatusCode() == http.StatusNotFound {
return "", &bucketNotFound{}
}
return "", err
}
return captureBucketRegion.BucketRegion, nil
}
type deserializeBucketRegion struct {
BucketRegion string
}
func (d *deserializeBucketRegion) RegisterMiddleware(stack *middleware.Stack) error {
return stack.Deserialize.Add(d, middleware.After)
}
func (d *deserializeBucketRegion) ID() string {
return "DeserializeBucketRegion"
}
func (d *deserializeBucketRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
if err != nil {
return out, metadata, err
}
resp, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse)
}
d.BucketRegion = resp.Header.Get(bucketRegionHeader)
return out, metadata, err
}
// BucketNotFound indicates the bucket was not found in the partition when calling GetBucketRegion.
type BucketNotFound interface {
error
isBucketNotFound()
}
type bucketNotFound struct{}
func (b *bucketNotFound) Error() string {
return "bucket not found"
}
func (b *bucketNotFound) isBucketNotFound() {}
var _ BucketNotFound = (*bucketNotFound)(nil)

View File

@ -1,9 +1,7 @@
package s3manager
package manager
import (
"io"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// BufferedReadSeeker is buffered io.ReadSeeker
@ -72,7 +70,7 @@ func (b *BufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
// ReadAt will read up to len(p) bytes at the given file offset.
// This will result in the buffer being cleared.
func (b *BufferedReadSeeker) ReadAt(p []byte, off int64) (int, error) {
_, err := b.Seek(off, sdkio.SeekStart)
_, err := b.Seek(off, io.SeekStart)
if err != nil {
return 0, err
}

View File

@ -1,7 +1,7 @@
//go:build !windows
// +build !windows
package s3manager
package manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return nil

View File

@ -1,4 +1,4 @@
package s3manager
package manager
func defaultUploadBufferProvider() ReadSeekerWriteToProvider {
return NewBufferedReadSeekerWriteToPool(1024 * 1024)

View File

@ -1,7 +1,7 @@
//go:build !windows
// +build !windows
package s3manager
package manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return nil

View File

@ -1,4 +1,4 @@
package s3manager
package manager
func defaultDownloadBufferProvider() WriterReadFromProvider {
return NewPooledBufferedWriterReadFromProvider(1024 * 1024)

View File

@ -0,0 +1,3 @@
// Package manager provides utilities to upload and download objects from
// S3 concurrently. Helpful for when working with large objects.
package manager

View File

@ -1,6 +1,8 @@
package s3manager
package manager
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@ -8,15 +10,15 @@ import (
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/smithy-go/logging"
)
const userAgentKey = "s3-transfer"
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
const DefaultDownloadPartSize = 1024 * 1024 * 5
@ -25,6 +27,9 @@ const DefaultDownloadPartSize = 1024 * 1024 * 5
// when using Download().
const DefaultDownloadConcurrency = 5
// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download.
const DefaultPartBodyMaxRetries = 3
type errReadingBody struct {
err error
}
@ -48,6 +53,15 @@ type Downloader struct {
// PartSize is ignored if the Range input parameter is provided.
PartSize int64
// PartBodyMaxRetries is the number of retry attempts to make for failed part downloads.
PartBodyMaxRetries int
// Logger to send logging messages to
Logger logging.Logger
// Enable Logging of part download retry attempts
LogInterruptedDownloads bool
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
//
@ -57,11 +71,11 @@ type Downloader struct {
Concurrency int
// An S3 client to use when performing downloads.
S3 s3iface.S3API
S3 DownloadAPIClient
// List of request options that will be passed down to individual API
// List of client options that will be passed down to individual API
// operation requests made by the downloader.
RequestOptions []request.Option
ClientOptions []func(*s3.Options)
// Defines the buffer strategy used when downloading a part.
//
@ -72,10 +86,10 @@ type Downloader struct {
BufferProvider WriterReadFromProvider
}
// WithDownloaderRequestOptions appends to the Downloader's API request options.
func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
// WithDownloaderClientOptions appends to the Downloader's API request options.
func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) {
return func(d *Downloader) {
d.RequestOptions = append(d.RequestOptions, opts...)
d.ClientOptions = append(d.ClientOptions, opts...)
}
}
@ -87,26 +101,29 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
//
// Example:
//
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
// // Load AWS Config
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
// // Create an S3 client using the loaded configuration
// s3.NewFromConfig(cfg)
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// // Create a downloader passing it the S3 client
// downloader := manager.NewDownloader(s3.NewFromConfig(cfg))
//
// // Create a downloader with the client and custom downloader options
// downloader := manager.NewDownloader(client, func(d *manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
return newDownloader(s3.New(c), options...)
}
func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Downloader {
func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloader {
d := &Downloader{
S3: client,
PartSize: DefaultDownloadPartSize,
Concurrency: DefaultDownloadConcurrency,
BufferProvider: defaultDownloadBufferProvider(),
S3: c,
PartSize: DefaultDownloadPartSize,
PartBodyMaxRetries: DefaultPartBodyMaxRetries,
Concurrency: DefaultDownloadConcurrency,
BufferProvider: defaultDownloadBufferProvider(),
}
for _, option := range options {
option(d)
@ -115,58 +132,7 @@ func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Download
return d
}
// NewDownloaderWithClient creates a new Downloader instance to downloads
// objects from S3 in concurrent chunks. Pass in additional functional
// options to customize the downloader behavior. Requires a S3 service client
// to make S3 API calls.
//
// Example:
//
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // The S3 client the S3 Downloader will use
// s3Svc := s3.New(sess)
//
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
return newDownloader(svc, options...)
}
type maxRetrier interface {
MaxRetries() int
}
// Download downloads an object in S3 and writes the payload into w using
// concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is called from.
// Modifying the options will not impact the original Downloader instance.
//
// It is safe to call this method concurrently across goroutines.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
//
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
return d.DownloadWithContext(aws.BackgroundContext(), w, input, options...)
}
// DownloadWithContext downloads an object in S3 and writes the payload into w
// Download downloads an object in S3 and writes the payload into w
// using concurrent GET requests. The n int64 returned is the size of the object downloaded
// in bytes.
//
@ -179,11 +145,25 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is
// called from. Modifying the options will not impact the original Downloader
// instance. Use the WithDownloaderRequestOptions helper function to pass in request
// instance. Use the WithDownloaderClientOptions helper function to pass in request
// options that will be applied to all API operations made with this downloader.
//
// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer.
// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. In case you download
// files into memory do not forget to pre-allocate memory to avoid additional allocations
// and GC runs.
//
// Example:
//
// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput
// buf := make([]byte, int(headObject.ContentLength))
// // wrap with aws.WriteAtBuffer
// w := s3manager.NewWriteAtBuffer(buf)
// // download file into the memory
// numBytesDownloaded, err := downloader.Download(ctx, w, &s3.GetObjectInput{
// Bucket: aws.String(bucket),
// Key: aws.String(item),
// })
//
// Specifying a Downloader.Concurrency of 1 will cause the Downloader to
// download the parts from S3 sequentially.
@ -193,21 +173,29 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
// If the GetObjectInput's Range value is provided that will cause the downloader
// to perform a single GetObjectInput request for that object's range. This will
// caused the part size, and concurrency configurations to be ignored.
func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
if err := validateSupportedARNType(aws.StringValue(input.Bucket)); err != nil {
func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) {
if err := validateSupportedARNType(aws.ToString(input.Bucket)); err != nil {
return 0, err
}
impl := downloader{w: w, in: input, cfg: d, ctx: ctx}
// Copy ClientOptions
clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1)
clientOptions = append(clientOptions, func(o *s3.Options) {
o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey))
})
clientOptions = append(clientOptions, impl.cfg.ClientOptions...)
impl.cfg.ClientOptions = clientOptions
for _, option := range options {
option(&impl.cfg)
}
impl.cfg.RequestOptions = append(impl.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
if s, ok := d.S3.(maxRetrier); ok {
impl.partBodyMaxRetries = s.MaxRetries()
}
// Ensures we don't need nil checks later on
impl.cfg.Logger = logging.WithContext(ctx, impl.cfg.Logger)
impl.partBodyMaxRetries = d.PartBodyMaxRetries
impl.totalBytes = -1
if impl.cfg.Concurrency == 0 {
@ -221,70 +209,9 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s
return impl.download()
}
// DownloadWithIterator will download a batched amount of objects in S3 and writes them
// to the io.WriterAt specificed in the iterator.
//
// Example:
//
// svc := s3manager.NewDownloader(session)
//
// fooFile, err := os.Open("/tmp/foo.file")
// if err != nil {
// return err
// }
//
// barFile, err := os.Open("/tmp/bar.file")
// if err != nil {
// return err
// }
//
// objects := []s3manager.BatchDownloadObject {
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("foo"),
// },
// Writer: fooFile,
// },
// {
// Object: &s3.GetObjectInput {
// Bucket: aws.String("bucket"),
// Key: aws.String("bar"),
// },
// Writer: barFile,
// },
// }
//
// iter := &s3manager.DownloadObjectsIterator{Objects: objects}
// if err := svc.DownloadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (d Downloader) DownloadWithIterator(ctx aws.Context, iter BatchDownloadIterator, opts ...func(*Downloader)) error {
var errs []Error
for iter.Next() {
object := iter.DownloadObject()
if _, err := d.DownloadWithContext(ctx, object.Writer, object.Object, opts...); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
errs = append(errs, newError(err, object.Object.Bucket, object.Object.Key))
}
}
if len(errs) > 0 {
return NewBatchError("BatchedDownloadIncomplete", "some objects have failed to download.", errs)
}
return nil
}
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
ctx aws.Context
ctx context.Context
cfg Downloader
in *s3.GetObjectInput
@ -307,7 +234,7 @@ func (d *downloader) download() (n int64, err error) {
// If range is specified fall back to single download of that range
// this enables the functionality of ranged gets with the downloader but
// at the cost of no multipart downloads.
if rng := aws.StringValue(d.in.Range); len(rng) > 0 {
if rng := aws.ToString(d.in.Range); len(rng) > 0 {
d.downloadRange(rng)
return d.written, d.err
}
@ -349,9 +276,13 @@ func (d *downloader) download() (n int64, err error) {
// keep grabbing chunks of data until the range of bytes specified in
// the request is out of range of the content. Once, this happens, a
// 416 should occur.
e, ok := d.err.(awserr.RequestFailure)
if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable {
d.err = nil
var responseError interface {
HTTPStatusCode() int
}
if errors.As(d.err, &responseError) {
if responseError.HTTPStatusCode() == http.StatusRequestedRangeNotSatisfiable {
d.err = nil
}
}
}
@ -419,16 +350,16 @@ func (d *downloader) downloadRange(rng string) {
// downloadChunk downloads the chunk from s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
in := &s3.GetObjectInput{}
awsutil.Copy(in, d.in)
var params s3.GetObjectInput
awsutil.Copy(&params, d.in)
// Get the next byte range of data
in.Range = aws.String(chunk.ByteRange())
params.Range = aws.String(chunk.ByteRange())
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
n, err = d.tryDownloadChunk(in, &chunk)
n, err = d.tryDownloadChunk(&params, &chunk)
if err == nil {
break
}
@ -444,9 +375,10 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
}
chunk.cur = 0
logMessage(d.cfg.S3, aws.LogDebugWithRequestRetries,
fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
aws.StringValue(in.Key), err, retry))
d.cfg.Logger.Logf(logging.Debug,
"object part body download interrupted %s, err, %v, retrying attempt %d",
aws.ToString(params.Key), err, retry)
}
d.incrWritten(n)
@ -454,14 +386,14 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
return err
}
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) {
cleanup := func() {}
if d.cfg.BufferProvider != nil {
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
}
defer cleanup()
resp, err := d.cfg.S3.GetObjectWithContext(d.ctx, in, d.cfg.RequestOptions...)
resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...)
if err != nil {
return 0, err
}
@ -480,21 +412,6 @@ func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64
return n, nil
}
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
s, ok := svc.(*s3.S3)
if !ok {
return
}
if s.Config.Logger == nil {
return
}
if s.Config.LogLevel.Matches(level) {
s.Config.Logger.Log(msg)
}
}
// getTotalBytes is a thread-safe getter for retrieving the total byte status.
func (d *downloader) getTotalBytes() int64 {
d.m.Lock()
@ -519,8 +436,8 @@ func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
if resp.ContentRange == nil {
// ContentRange is nil when the full file contents is provided, and
// is not chunked. Use ContentLength instead.
if resp.ContentLength != nil {
d.totalBytes = *resp.ContentLength
if resp.ContentLength > 0 {
d.totalBytes = resp.ContentLength
return
}
} else {

View File

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.67"

View File

@ -1,14 +1,13 @@
package s3manager
package manager
import (
"context"
"fmt"
"sync"
"github.com/aws/aws-sdk-go/aws"
)
type byteSlicePool interface {
Get(aws.Context) (*[]byte, error)
Get(context.Context) (*[]byte, error)
Put(*[]byte)
ModifyCapacity(int)
SliceSize() int64
@ -40,7 +39,7 @@ func newMaxSlicePool(sliceSize int64) *maxSlicePool {
var errZeroCapacity = fmt.Errorf("get called on zero capacity pool")
func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) {
func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) {
// check if context is canceled before attempting to get a slice
// this ensures priority is given to the cancel case first
select {

View File

@ -1,27 +1,24 @@
package aws
package manager
import (
"io"
"strings"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
)
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
// streaming payload API operations.
//
// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
// operation's input will prevent that operation being retried in the case of
// network errors, and cause operation requests to fail if the operation
// A readSeekCloser wrapping an nonseekable io.Reader used in an API operation's
// input will prevent that operation being retried in the case of
// network errors, and cause operation requests to fail if yhe operation
// requires payload signing.
//
// Note: If using With S3 PutObject to stream an object upload The SDK's S3
// Upload manager (s3manager.Uploader) provides support for streaming with the
// ability to retry network errors.
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
// Note: If using with S3 PutObject to stream an object upload. The SDK's S3
// Upload Manager(s3manager.Uploader) provides support for streaming
// with the ability to retry network errors.
func ReadSeekCloser(r io.Reader) *ReaderSeekerCloser {
return &ReaderSeekerCloser{r}
}
// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
@ -30,20 +27,67 @@ type ReaderSeekerCloser struct {
r io.Reader
}
// IsReaderSeekable returns if the underlying reader type can be seeked. A
// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
// type.
func IsReaderSeekable(r io.Reader) bool {
switch v := r.(type) {
case ReaderSeekerCloser:
return v.IsSeeker()
// seekerLen attempts to get the number of bytes remaining at the seeker's
// current position. Returns the number of bytes remaining or error.
func seekerLen(s io.Seeker) (int64, error) {
// Determine if the seeker is actually seekable. ReaderSeekerCloser
// hides the fact that a io.Readers might not actually be seekable.
switch v := s.(type) {
case *ReaderSeekerCloser:
return v.IsSeeker()
case io.ReadSeeker:
return true
default:
return false
return v.GetLen()
}
return computeSeekerLength(s)
}
// GetLen returns the length of the bytes remaining in the underlying reader.
// Checks first for Len(), then io.Seeker to determine the size of the
// underlying reader.
//
// Will return -1 if the length cannot be determined.
func (r *ReaderSeekerCloser) GetLen() (int64, error) {
if l, ok := r.HasLen(); ok {
return int64(l), nil
}
if s, ok := r.r.(io.Seeker); ok {
return computeSeekerLength(s)
}
return -1, nil
}
func computeSeekerLength(s io.Seeker) (int64, error) {
curOffset, err := s.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}
endOffset, err := s.Seek(0, io.SeekEnd)
if err != nil {
return 0, err
}
_, err = s.Seek(curOffset, io.SeekStart)
if err != nil {
return 0, err
}
return endOffset - curOffset, nil
}
// HasLen returns the length of the underlying reader if the value implements
// the Len() int method.
func (r *ReaderSeekerCloser) HasLen() (int, bool) {
type lenner interface {
Len() int
}
if lr, ok := r.r.(lenner); ok {
return lr.Len(), true
}
return 0, false
}
// Read reads from the reader up to size of p. The number of bytes read, and
@ -53,7 +97,7 @@ func IsReaderSeekable(r io.Reader) bool {
// returned.
//
// Performs the same functionality as io.Reader Read
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
func (r *ReaderSeekerCloser) Read(p []byte) (int, error) {
switch t := r.r.(type) {
case io.Reader:
return t.Read(p)
@ -67,7 +111,7 @@ func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
// and an error, if any.
//
// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
func (r *ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
switch t := r.r.(type) {
case io.Seeker:
return t.Seek(offset, whence)
@ -76,80 +120,15 @@ func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
}
// IsSeeker returns if the underlying reader is also a seeker.
func (r ReaderSeekerCloser) IsSeeker() bool {
func (r *ReaderSeekerCloser) IsSeeker() bool {
_, ok := r.r.(io.Seeker)
return ok
}
// HasLen returns the length of the underlying reader if the value implements
// the Len() int method.
func (r ReaderSeekerCloser) HasLen() (int, bool) {
type lenner interface {
Len() int
}
if lr, ok := r.r.(lenner); ok {
return lr.Len(), true
}
return 0, false
}
// GetLen returns the length of the bytes remaining in the underlying reader.
// Checks first for Len(), then io.Seeker to determine the size of the
// underlying reader.
//
// Will return -1 if the length cannot be determined.
func (r ReaderSeekerCloser) GetLen() (int64, error) {
if l, ok := r.HasLen(); ok {
return int64(l), nil
}
if s, ok := r.r.(io.Seeker); ok {
return seekerLen(s)
}
return -1, nil
}
// SeekerLen attempts to get the number of bytes remaining at the seeker's
// current position. Returns the number of bytes remaining or error.
func SeekerLen(s io.Seeker) (int64, error) {
// Determine if the seeker is actually seekable. ReaderSeekerCloser
// hides the fact that a io.Readers might not actually be seekable.
switch v := s.(type) {
case ReaderSeekerCloser:
return v.GetLen()
case *ReaderSeekerCloser:
return v.GetLen()
}
return seekerLen(s)
}
func seekerLen(s io.Seeker) (int64, error) {
curOffset, err := s.Seek(0, sdkio.SeekCurrent)
if err != nil {
return 0, err
}
endOffset, err := s.Seek(0, sdkio.SeekEnd)
if err != nil {
return 0, err
}
_, err = s.Seek(curOffset, sdkio.SeekStart)
if err != nil {
return 0, err
}
return endOffset - curOffset, nil
}
// Close closes the ReaderSeekerCloser.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
func (r ReaderSeekerCloser) Close() error {
func (r *ReaderSeekerCloser) Close() error {
switch t := r.r.(type) {
case io.Closer:
return t.Close()
@ -206,59 +185,3 @@ func (b *WriteAtBuffer) Bytes() []byte {
defer b.m.Unlock()
return b.buf
}
// MultiCloser is a utility to close multiple io.Closers within a single
// statement.
type MultiCloser []io.Closer
// Close closes all of the io.Closers making up the MultiClosers. Any
// errors that occur while closing will be returned in the order they
// occur.
func (m MultiCloser) Close() error {
var errs errors
for _, c := range m {
err := c.Close()
if err != nil {
errs = append(errs, err)
}
}
if len(errs) != 0 {
return errs
}
return nil
}
type errors []error
func (es errors) Error() string {
var parts []string
for _, e := range es {
parts = append(parts, e.Error())
}
return strings.Join(parts, "\n")
}
// CopySeekableBody copies the seekable body to an io.Writer
func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
curPos, err := src.Seek(0, sdkio.SeekCurrent)
if err != nil {
return 0, err
}
// copy errors may be assumed to be from the body.
n, err := io.Copy(dst, src)
if err != nil {
return n, err
}
// seek back to the first position after reading to reset
// the body for transmission.
_, err = src.Seek(curPos, sdkio.SeekStart)
if err != nil {
return n, err
}
return n, nil
}

View File

@ -1,25 +1,25 @@
package s3manager
package manager
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"sort"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
// on Amazon S3.
const MaxUploadParts = 10000
const MaxUploadParts int32 = 10000
// MinUploadPartSize is the minimum allowed part size when uploading a part to
// Amazon S3.
@ -40,58 +40,55 @@ const DefaultUploadConcurrency = 5
//
// Example:
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// u := manager.NewUploader(client)
// output, err := u.upload(context.Background(), input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// var multierr manager.MultiUploadFailure
// if errors.As(err, &multierr) {
// fmt.Printf("upload failure UploadID=%s, %s\n", multierr.UploadID(), multierr.Error())
// } else {
// fmt.Printf("upload failure, %s\n", err.Error())
// }
// }
type MultiUploadFailure interface {
awserr.Error
error
// Returns the upload id for the S3 multipart upload that failed.
// UploadID returns the upload id for the S3 multipart upload that failed.
UploadID() string
}
// So that the Error interface type can be included as an anonymous field
// in the multiUploadError struct and not conflict with the error.Error() method.
type awsError awserr.Error
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
// Composed of BaseError for code, message, and original error
//
// Should be used for an error that occurred failing a S3 multipart upload,
// and a upload ID is available. If an uploadID is not available a more relevant
type multiUploadError struct {
awsError
err error
// ID for multipart upload which failed.
uploadID string
}
// Error returns the string representation of the error.
// batchItemError returns the string representation of the error.
//
// # See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m multiUploadError) Error() string {
extra := fmt.Sprintf("upload id: %s", m.uploadID)
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
func (m *multiUploadError) Error() string {
var extra string
if m.err != nil {
extra = fmt.Sprintf(", cause: %s", m.err.Error())
}
return fmt.Sprintf("upload multipart failed, upload id: %s%s", m.uploadID, extra)
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (m multiUploadError) String() string {
return m.Error()
// Unwrap returns the underlying error that cause the upload failure
func (m *multiUploadError) Unwrap() error {
return m.err
}
// UploadID returns the id of the S3 upload which failed.
func (m multiUploadError) UploadID() string {
func (m *multiUploadError) UploadID() string {
return m.uploadID
}
@ -100,23 +97,68 @@ type UploadOutput struct {
// The URL where the object was uploaded to.
Location string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
// Will be empty string if multipart upload was not used, and the object
// was uploaded as a single PutObject call.
UploadID string
// The list of parts that were uploaded and their checksums. Will be empty
// if multipart upload was not used, and the object was uploaded as a
// single PutObject call.
CompletedParts []types.CompletedPart
// Indicates whether the uploaded object uses an S3 Bucket Key for server-side
// encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled bool
// The base64-encoded, 32-bit CRC32 checksum of the object.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object.
ChecksumSHA256 *string
// Entity tag for the uploaded object.
ETag *string
// If the object expiration is configured, this will contain the expiration date
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
Expiration *string
// The object key of the newly created object.
Key *string
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged types.RequestCharged
// If present, specifies the ID of the Amazon Web Services Key Management Service
// (Amazon Web Services KMS) symmetric customer managed customer master key (CMK)
// that was used for the object.
SSEKMSKeyId *string
// If you specified server-side encryption either with an Amazon S3-managed
// encryption key or an Amazon Web Services KMS customer master key (CMK) in your
// initiate multipart upload request, the response includes this header. It
// confirms the encryption algorithm that Amazon S3 used to encrypt the object.
ServerSideEncryption types.ServerSideEncryption
// The version of the object that was uploaded. Will only be populated if
// the S3 Bucket is versioned. If the bucket is not versioned this field
// will not be set.
VersionID *string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
UploadID string
// Entity tag of the object.
ETag *string
}
// WithUploaderRequestOptions appends to the Uploader's API request options.
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
// WithUploaderRequestOptions appends to the Uploader's API client options.
func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) {
return func(u *Uploader) {
u.RequestOptions = append(u.RequestOptions, opts...)
u.ClientOptions = append(u.ClientOptions, opts...)
}
}
@ -124,13 +166,34 @@ func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
//
// The ContentMD5 member for pre-computed MD5 checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, the
// ContentMD5 will be used.
// # Pre-computed Checksums
//
// The Checksum members for pre-computed checksums will be ignored for
// multipart uploads. Objects that will be uploaded in a single part, will
// include the checksum member in the request.
// Care must be taken when using pre-computed checksums the transfer upload
// manager. The format and value of the checksum differs based on if the upload
// will preformed as a single or multipart upload.
//
// Uploads that are smaller than the Uploader's PartSize will be uploaded using
// the PutObject API operation. Pre-computed checksum of the uploaded object's
// content are valid for these single part uploads. If the checksum provided
// does not match the uploaded content the upload will fail.
//
// Uploads that are larger than the Uploader's PartSize will be uploaded using
// multi-part upload. The Pre-computed checksums for these uploads are a
// checksum of checksums of each part. Not a checksum of the full uploaded
// bytes. With the format of "<checksum of checksum>-<numberParts>", (e.g.
// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match
// this format, as matches the content uploaded, the upload will fail.
//
// ContentMD5 for multipart upload is explicitly ignored for multipart upload,
// and its value is suppressed.
//
// # Automatically Computed Checksums
//
// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput
// is set to a valid value, the SDK will automatically compute the checksum of
// the individual uploaded parts. The UploadOutput result from Upload will
// include the checksum of part checksums provided by S3
// CompleteMultipartUpload API call.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
@ -164,14 +227,14 @@ type Uploader struct {
// error must be used to signal end of stream.
//
// Defaults to package const's MaxUploadParts value.
MaxUploadParts int
MaxUploadParts int32
// The client to use when uploading to S3.
S3 s3iface.S3API
S3 UploadAPIClient
// List of request options that will be passed down to individual API
// operation requests made by the uploader.
RequestOptions []request.Option
ClientOptions []func(*s3.Options)
// Defines the buffer strategy used when uploading a part
BufferProvider ReadSeekerWriteToProvider
@ -187,21 +250,23 @@ type Uploader struct {
//
// Example:
//
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
// // Load AWS Config
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
// }
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
// // Create an S3 Client with the config
// client := s3.NewFromConfig(cfg)
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// // Create an uploader passing it the client
// uploader := manager.NewUploader(client)
//
// // Create an uploader with the client and custom options
// uploader := manager.NewUploader(client, func(u *manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
return newUploader(s3.New(c), options...)
}
func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: client,
PartSize: DefaultUploadPartSize,
@ -220,72 +285,10 @@ func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
return u
}
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
// additional functional options to customize the uploader's behavior. Requires
// a S3 service client to make S3 API calls.
//
// Example:
//
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
return newUploader(svc, options...)
}
// Upload uploads an object to S3, intelligently buffering large files into
// smaller chunks and sending them in parallel across multiple goroutines. You
// can configure the buffer size and concurrency through the Uploader's parameters.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// Use the WithUploaderRequestOptions helper function to pass in request
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
//
// Example:
//
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform an upload.
// result, err := uploader.Upload(upParams)
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
}
// UploadWithContext uploads an object to S3, intelligently buffering large
// Upload uploads an object to S3, intelligently buffering large
// files into smaller chunks and sending them in parallel across multiple
// goroutines. You can configure the buffer size and concurrency through the
// Uploader's parameters.
//
// UploadWithContext is the same as Upload with the additional support for
// Context input parameters. The Context must not be nil. A nil Context will
// cause a panic. Use the context to add deadlining, timeouts, etc. The
// UploadWithContext may create sub-contexts for individual underlying requests.
// Uploader parameters.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
@ -295,80 +298,34 @@ func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*Uploa
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) (
*UploadOutput, error,
) {
i := uploader{in: input, cfg: u, ctx: ctx}
// Copy ClientOptions
clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1)
clientOptions = append(clientOptions, func(o *s3.Options) {
o.APIOptions = append(o.APIOptions,
middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey),
)
})
clientOptions = append(clientOptions, i.cfg.ClientOptions...)
i.cfg.ClientOptions = clientOptions
for _, opt := range opts {
opt(&i.cfg)
}
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
return i.upload()
}
// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
// the iterator pattern to know which object to upload next. Since this is an interface this
// allows for custom defined functionality.
//
// Example:
//
// svc:= s3manager.NewUploader(sess)
//
// objects := []BatchUploadObject{
// {
// Object: &s3manager.UploadInput {
// Key: aws.String("key"),
// Bucket: aws.String("bucket"),
// },
// },
// }
//
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
// return err
// }
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
var errs []Error
for iter.Next() {
object := iter.UploadObject()
if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
if object.After == nil {
continue
}
if err := object.After(); err != nil {
s3Err := Error{
OrigErr: err,
Bucket: object.Object.Bucket,
Key: object.Object.Key,
}
errs = append(errs, s3Err)
}
}
if len(errs) > 0 {
return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
}
return nil
}
// internal structure to manage an upload to S3.
type uploader struct {
ctx aws.Context
ctx context.Context
cfg Uploader
in *UploadInput
in *s3.PutObjectInput
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
@ -378,13 +335,12 @@ type uploader struct {
// multipart upload.
func (u *uploader) upload() (*UploadOutput, error) {
if err := u.init(); err != nil {
return nil, awserr.New("ReadRequestBody", "unable to initialize upload", err)
return nil, fmt.Errorf("unable to initialize upload: %w", err)
}
defer u.cfg.partPool.Close()
if u.cfg.PartSize < MinUploadPartSize {
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
return nil, awserr.New("ConfigError", msg, nil)
return nil, fmt.Errorf("part size must be at least %d bytes", MinUploadPartSize)
}
// Do one read to determine if we have more than one part
@ -393,7 +349,7 @@ func (u *uploader) upload() (*UploadOutput, error) {
return u.singlePart(reader, cleanup)
} else if err != nil {
cleanup()
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
return nil, fmt.Errorf("read upload data failed: %w", err)
}
mu := multiuploader{uploader: u}
@ -402,7 +358,7 @@ func (u *uploader) upload() (*UploadOutput, error) {
// init will initialize all default options.
func (u *uploader) init() error {
if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil {
if err := validateSupportedARNType(aws.ToString(u.in.Bucket)); err != nil {
return err
}
@ -442,7 +398,7 @@ func (u *uploader) initSize() error {
switch r := u.in.Body.(type) {
case io.Seeker:
n, err := aws.SeekerLen(r)
n, err := seekerLen(r)
if err != nil {
return err
}
@ -528,27 +484,69 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
defer cleanup()
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
var params s3.PutObjectInput
awsutil.Copy(&params, u.in)
params.Body = r
// Need to use request form because URL generated in request is
// used in return.
req, out := u.cfg.S3.PutObjectRequest(params)
req.SetContext(u.ctx)
req.ApplyOptions(u.cfg.RequestOptions...)
if err := req.Send(); err != nil {
var locationRecorder recordLocationClient
out, err := u.cfg.S3.PutObject(u.ctx, &params,
append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
if err != nil {
return nil, err
}
url := req.HTTPRequest.URL.String()
return &UploadOutput{
Location: url,
VersionID: out.VersionId,
ETag: out.ETag,
Location: locationRecorder.location,
BucketKeyEnabled: out.BucketKeyEnabled,
ChecksumCRC32: out.ChecksumCRC32,
ChecksumCRC32C: out.ChecksumCRC32C,
ChecksumSHA1: out.ChecksumSHA1,
ChecksumSHA256: out.ChecksumSHA256,
ETag: out.ETag,
Expiration: out.Expiration,
Key: params.Key,
RequestCharged: out.RequestCharged,
SSEKMSKeyId: out.SSEKMSKeyId,
ServerSideEncryption: out.ServerSideEncryption,
VersionID: out.VersionId,
}, nil
}
type httpClient interface {
Do(r *http.Request) (*http.Response, error)
}
type recordLocationClient struct {
httpClient
location string
}
func (c *recordLocationClient) WrapClient() func(o *s3.Options) {
return func(o *s3.Options) {
c.httpClient = o.HTTPClient
o.HTTPClient = c
}
}
func (c *recordLocationClient) Do(r *http.Request) (resp *http.Response, err error) {
resp, err = c.httpClient.Do(r)
if err != nil {
return resp, err
}
if resp.Request != nil && resp.Request.URL != nil {
url := *resp.Request.URL
url.RawQuery = ""
c.location = url.String()
}
return resp, err
}
// internal structure to manage a specific multipart upload to S3.
type multiuploader struct {
*uploader
@ -562,26 +560,28 @@ type multiuploader struct {
// keeps track of a single chunk of data being sent to S3.
type chunk struct {
buf io.ReadSeeker
num int64
num int32
cleanup func()
}
// completedParts is a wrapper to make parts sortable by their part number,
// since S3 required this list to be sent in sorted order.
type completedParts []*s3.CompletedPart
type completedParts []types.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
var params s3.CreateMultipartUploadInput
awsutil.Copy(&params, u.in)
// Create the multipart
resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
var locationRecorder recordLocationClient
resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, &params,
append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
if err != nil {
cleanup()
return nil, err
@ -596,7 +596,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
}
// Send part 1 to the workers
var num int64 = 1
var num int32 = 1
ch <- chunk{buf: firstBuf, num: num, cleanup: cleanup}
// Read and queue the rest of the parts
@ -625,39 +625,38 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
// Close the channel, wait for workers, and complete upload
close(ch)
u.wg.Wait()
complete := u.complete()
completeOut := u.complete()
if err := u.geterr(); err != nil {
return nil, &multiUploadError{
awsError: awserr.New(
"MultipartUpload",
"upload multipart failed",
err),
err: err,
uploadID: u.uploadID,
}
}
// Create a presigned URL of the S3 Get Object in order to have parity with
// single part upload.
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
})
getReq.Config.Credentials = credentials.AnonymousCredentials
getReq.SetContext(u.ctx)
uploadLocation, _, _ := getReq.PresignRequest(1)
return &UploadOutput{
Location: uploadLocation,
VersionID: complete.VersionId,
UploadID: u.uploadID,
ETag: complete.ETag,
Location: locationRecorder.location,
UploadID: u.uploadID,
CompletedParts: u.parts,
BucketKeyEnabled: completeOut.BucketKeyEnabled,
ChecksumCRC32: completeOut.ChecksumCRC32,
ChecksumCRC32C: completeOut.ChecksumCRC32C,
ChecksumSHA1: completeOut.ChecksumSHA1,
ChecksumSHA256: completeOut.ChecksumSHA256,
ETag: completeOut.ETag,
Expiration: completeOut.Expiration,
Key: completeOut.Key,
RequestCharged: completeOut.RequestCharged,
SSEKMSKeyId: completeOut.SSEKMSKeyId,
ServerSideEncryption: completeOut.ServerSideEncryption,
VersionID: completeOut.VersionId,
}, nil
}
func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error) (bool, error) {
func (u *multiuploader) shouldContinue(part int32, nextChunkLen int, err error) (bool, error) {
if err != nil && err != io.EOF {
return false, awserr.New("ReadRequestBody", "read multipart upload data failed", err)
return false, fmt.Errorf("read multipart upload data failed, %w", err)
}
if nextChunkLen == 0 {
@ -669,16 +668,16 @@ func (u *multiuploader) shouldContinue(part int64, nextChunkLen int, err error)
part++
// This upload exceeded maximum number of supported parts, error now.
if part > int64(u.cfg.MaxUploadParts) || part > int64(MaxUploadParts) {
if part > u.cfg.MaxUploadParts || part > MaxUploadParts {
var msg string
if part > int64(u.cfg.MaxUploadParts) {
if part > u.cfg.MaxUploadParts {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.cfg.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
return false, awserr.New("TotalPartsExceeded", msg, nil)
return false, fmt.Errorf(msg)
}
return true, err
@ -712,19 +711,30 @@ func (u *multiuploader) send(c chunk) error {
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
UploadId: &u.uploadID,
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
SSECustomerKey: u.in.SSECustomerKey,
PartNumber: &c.num,
}
SSECustomerKeyMD5: u.in.SSECustomerKeyMD5,
ExpectedBucketOwner: u.in.ExpectedBucketOwner,
RequestPayer: u.in.RequestPayer,
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
ChecksumAlgorithm: u.in.ChecksumAlgorithm,
// Invalid to set any of the individual ChecksumXXX members from
// PutObject as they are never valid for individual parts of a
// multipart upload.
PartNumber: c.num,
UploadId: &u.uploadID,
}
// TODO should do copy then clear?
resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...)
if err != nil {
return err
}
n := c.num
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
var completed types.CompletedPart
awsutil.Copy(&completed, resp)
completed.PartNumber = c.num
u.m.Lock()
u.parts = append(u.parts, completed)
@ -760,9 +770,11 @@ func (u *multiuploader) fail() {
Key: u.in.Key,
UploadId: &u.uploadID,
}
_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
_, err := u.cfg.S3.AbortMultipartUpload(u.ctx, params, u.cfg.ClientOptions...)
if err != nil {
logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
// TODO: Add logging
//logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
_ = err
}
}
@ -776,13 +788,12 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
params := &s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
}
resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
var params s3.CompleteMultipartUploadInput
awsutil.Copy(&params, u.in)
params.UploadId = &u.uploadID
params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts}
resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, &params, u.cfg.ClientOptions...)
if err != nil {
u.seterr(err)
u.fail()

View File

@ -1,11 +1,11 @@
package s3manager
package manager
import (
"bufio"
"io"
"sync"
"github.com/aws/aws-sdk-go/internal/sdkio"
"github.com/aws/aws-sdk-go-v2/internal/sdkio"
)
// WriterReadFrom defines an interface implementing io.Writer and io.ReaderFrom

View File

@ -51,7 +51,11 @@ func rcopy(dst, src reflect.Value, root bool) {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
if _, ok := src.Interface().(*time.Time); !ok {
dst.Set(reflect.New(e))
if dst.Kind() == reflect.String {
dst.SetString(e.String())
} else {
dst.Set(reflect.New(e))
}
} else {
tempValue := reflect.New(e)
tempValue.Elem().Set(src.Elem())
@ -59,7 +63,7 @@ func rcopy(dst, src reflect.Value, root bool) {
dst.Set(tempValue)
}
}
if src.Elem().IsValid() {
if dst.Kind() != reflect.String && src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}

View File

@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool {
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type they are equal
// If the elements are both nil, and of the same type the are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {
@ -23,5 +23,11 @@ func DeepEqual(a, b interface{}) bool {
return false
}
// Special casing for strings as typed enumerations are string aliases
// but are not deep equal.
if ra.Kind() == reflect.String && rb.Kind() == reflect.String {
return ra.String() == rb.String()
}
return reflect.DeepEqual(ra.Interface(), rb.Interface())
}

View File

@ -185,12 +185,13 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
rvals := rValuesAtPath(i, path, true, false, v == nil)
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
setValue(rval, v)
}
}
@ -215,7 +216,10 @@ func setValue(dstVal reflect.Value, src interface{}) {
}
dstVal.Set(srcVal)
} else {
dstVal.Set(srcVal)
if dstVal.Kind() == reflect.String {
dstVal.SetString(srcVal.String())
} else {
dstVal.Set(srcVal)
}
}
}

Some files were not shown because too many files have changed in this diff Show More