Initial commit for Azure Blob Storage SupportX
This commit is contained in:
		
							parent
							
								
									cca8814bed
								
							
						
					
					
						commit
						3e6a24b152
					
				|  | @ -17,6 +17,14 @@ | ||||||
|   revision = "aad3f485ee528456e0768f20397b4d9dd941e755" |   revision = "aad3f485ee528456e0768f20397b4d9dd941e755" | ||||||
|   version = "v0.25.0" |   version = "v0.25.0" | ||||||
| 
 | 
 | ||||||
|  | [[projects]] | ||||||
|  |   digest = "1:602649ff074ccee9273e1d3b25c4069f13a70fa0c232957c7d68a6f02fb7a9ea" | ||||||
|  |   name = "github.com/Azure/azure-pipeline-go" | ||||||
|  |   packages = ["pipeline"] | ||||||
|  |   pruneopts = "NUT" | ||||||
|  |   revision = "232aee85e8e3a6223a11c0943f7df2ae0fac00e4" | ||||||
|  |   version = "v0.2.2" | ||||||
|  | 
 | ||||||
| [[projects]] | [[projects]] | ||||||
|   digest = "1:f56288257633effcb1743f6fa77d12dd3eaeeb5c78b3de6e199eb1b4780f57f5" |   digest = "1:f56288257633effcb1743f6fa77d12dd3eaeeb5c78b3de6e199eb1b4780f57f5" | ||||||
|   name = "github.com/Azure/azure-sdk-for-go" |   name = "github.com/Azure/azure-sdk-for-go" | ||||||
|  | @ -28,6 +36,14 @@ | ||||||
|   revision = "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a" |   revision = "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a" | ||||||
|   version = "v19.1.0" |   version = "v19.1.0" | ||||||
| 
 | 
 | ||||||
|  | [[projects]] | ||||||
|  |   digest = "1:0a1307390445735712b3b914b447cc7ad1974795e50470b7e092cfaa6063f6dd" | ||||||
|  |   name = "github.com/Azure/azure-storage-blob-go" | ||||||
|  |   packages = ["azblob"] | ||||||
|  |   pruneopts = "NUT" | ||||||
|  |   revision = "fc700035fe4a7020f50d49f420b3c088aed57e03" | ||||||
|  |   version = "v0.8.0" | ||||||
|  | 
 | ||||||
| [[projects]] | [[projects]] | ||||||
|   branch = "master" |   branch = "master" | ||||||
|   digest = "1:81f8c061c3d18ed1710957910542bc17d2b789c6cd19e0f654c30b35fd255ca5" |   digest = "1:81f8c061c3d18ed1710957910542bc17d2b789c6cd19e0f654c30b35fd255ca5" | ||||||
|  | @ -445,7 +461,7 @@ | ||||||
|   version = "v0.2.0" |   version = "v0.2.0" | ||||||
| 
 | 
 | ||||||
| [[projects]] | [[projects]] | ||||||
|   digest = "1:5924704ec96f00247784c512cc57f45a595030376a7ff2ff993bf356793a2cb0" |   digest = "1:c94f3440939c4a6e4ad6db6cd9315c7817ea6c04fe28cfe2aaae2099d2bc3cef" | ||||||
|   name = "github.com/google/go-containerregistry" |   name = "github.com/google/go-containerregistry" | ||||||
|   packages = [ |   packages = [ | ||||||
|     "pkg/authn", |     "pkg/authn", | ||||||
|  | @ -466,6 +482,7 @@ | ||||||
|     "pkg/v1/tarball", |     "pkg/v1/tarball", | ||||||
|     "pkg/v1/types", |     "pkg/v1/types", | ||||||
|     "pkg/v1/v1util", |     "pkg/v1/v1util", | ||||||
|  |     "pkg/v1/validate", | ||||||
|   ] |   ] | ||||||
|   pruneopts = "NUT" |   pruneopts = "NUT" | ||||||
|   revision = "31e00cede111067bae48bfc2cbfc522b0b36207f" |   revision = "31e00cede111067bae48bfc2cbfc522b0b36207f" | ||||||
|  | @ -604,6 +621,14 @@ | ||||||
|   revision = "81db2a75821ed34e682567d48be488a1c3121088" |   revision = "81db2a75821ed34e682567d48be488a1c3121088" | ||||||
|   version = "0.5" |   version = "0.5" | ||||||
| 
 | 
 | ||||||
|  | [[projects]] | ||||||
|  |   branch = "master" | ||||||
|  |   digest = "1:e0e7ecd8a4d1faddfb4b7e8a59684f803aa7f45489e25a64bb25d63691d4fc7a" | ||||||
|  |   name = "github.com/mattn/go-ieproxy" | ||||||
|  |   packages = ["."] | ||||||
|  |   pruneopts = "NUT" | ||||||
|  |   revision = "f9202b1cfdeb0c82ddd3dc1e8e9cd94b3c0c1b13" | ||||||
|  | 
 | ||||||
| [[projects]] | [[projects]] | ||||||
|   digest = "1:d0164259ed17929689df11205194d80288e8ae25351778f7a3421a24774c36f8" |   digest = "1:d0164259ed17929689df11205194d80288e8ae25351778f7a3421a24774c36f8" | ||||||
|   name = "github.com/mattn/go-shellwords" |   name = "github.com/mattn/go-shellwords" | ||||||
|  | @ -957,12 +982,13 @@ | ||||||
| 
 | 
 | ||||||
| [[projects]] | [[projects]] | ||||||
|   branch = "master" |   branch = "master" | ||||||
|   digest = "1:f3e57ce909415ab28ce6c6db1fa7bf7a9134c8d9cb53663775d766b515a665d8" |   digest = "1:d3ff7173b1da34e8fc91c30e8695c830bf5d7d7b0851fb8bdfb71fa862e9cf00" | ||||||
|   name = "golang.org/x/net" |   name = "golang.org/x/net" | ||||||
|   packages = [ |   packages = [ | ||||||
|     "context", |     "context", | ||||||
|     "context/ctxhttp", |     "context/ctxhttp", | ||||||
|     "http/httpguts", |     "http/httpguts", | ||||||
|  |     "http/httpproxy", | ||||||
|     "http2", |     "http2", | ||||||
|     "http2/hpack", |     "http2/hpack", | ||||||
|     "idna", |     "idna", | ||||||
|  | @ -1372,6 +1398,7 @@ | ||||||
|   analyzer-version = 1 |   analyzer-version = 1 | ||||||
|   input-imports = [ |   input-imports = [ | ||||||
|     "cloud.google.com/go/storage", |     "cloud.google.com/go/storage", | ||||||
|  |     "github.com/Azure/azure-storage-blob-go/azblob", | ||||||
|     "github.com/aws/aws-sdk-go/aws", |     "github.com/aws/aws-sdk-go/aws", | ||||||
|     "github.com/aws/aws-sdk-go/aws/session", |     "github.com/aws/aws-sdk-go/aws/session", | ||||||
|     "github.com/aws/aws-sdk-go/service/s3", |     "github.com/aws/aws-sdk-go/service/s3", | ||||||
|  | @ -1395,6 +1422,7 @@ | ||||||
|     "github.com/google/go-containerregistry/pkg/v1/random", |     "github.com/google/go-containerregistry/pkg/v1/random", | ||||||
|     "github.com/google/go-containerregistry/pkg/v1/remote", |     "github.com/google/go-containerregistry/pkg/v1/remote", | ||||||
|     "github.com/google/go-containerregistry/pkg/v1/tarball", |     "github.com/google/go-containerregistry/pkg/v1/tarball", | ||||||
|  |     "github.com/google/go-containerregistry/pkg/v1/validate", | ||||||
|     "github.com/google/go-github/github", |     "github.com/google/go-github/github", | ||||||
|     "github.com/karrick/godirwalk", |     "github.com/karrick/godirwalk", | ||||||
|     "github.com/minio/HighwayHash", |     "github.com/minio/HighwayHash", | ||||||
|  |  | ||||||
|  | @ -50,3 +50,7 @@ required = [ | ||||||
| [[constraint]] | [[constraint]] | ||||||
|   name = "github.com/minio/HighwayHash" |   name = "github.com/minio/HighwayHash" | ||||||
|   version = "1.0.0" |   version = "1.0.0" | ||||||
|  | 
 | ||||||
|  | [[constraint]] | ||||||
|  |   name = "github.com/Azure/azure-storage-blob-go" | ||||||
|  |   version = "0.8.0" | ||||||
|  |  | ||||||
|  | @ -98,6 +98,7 @@ You will need to store your build context in a place that kaniko can access. | ||||||
| Right now, kaniko supports these storage solutions: | Right now, kaniko supports these storage solutions: | ||||||
| - GCS Bucket | - GCS Bucket | ||||||
| - S3 Bucket | - S3 Bucket | ||||||
|  | - Azure Blob Storage | ||||||
| - Local Directory | - Local Directory | ||||||
| - Git Repository | - Git Repository | ||||||
| 
 | 
 | ||||||
|  | @ -126,11 +127,15 @@ When running kaniko, use the `--context` flag with the appropriate prefix to spe | ||||||
| | Local Directory   | dir://[path to a directory in the kaniko container]             | `dir:///workspace`                                            | | | Local Directory   | dir://[path to a directory in the kaniko container]             | `dir:///workspace`                                            | | ||||||
| | GCS Bucket        | gs://[bucket name]/[path to .tar.gz]                            | `gs://kaniko-bucket/path/to/context.tar.gz`                   | | | GCS Bucket        | gs://[bucket name]/[path to .tar.gz]                            | `gs://kaniko-bucket/path/to/context.tar.gz`                   | | ||||||
| | S3 Bucket         | s3://[bucket name]/[path to .tar.gz]                            | `s3://kaniko-bucket/path/to/context.tar.gz`                   | | | S3 Bucket         | s3://[bucket name]/[path to .tar.gz]                            | `s3://kaniko-bucket/path/to/context.tar.gz`                   | | ||||||
|  | | Azure Blob Storage| https://[account].[azureblobhostsuffix]/[container]/[path to .tar.gz] | `https://myaccount.blob.core.windows.net/container/path/to/context.tar.gz` | | ||||||
| | Git Repository    | git://[repository url][#reference]                              | `git://github.com/acme/myproject.git#refs/heads/mybranch`     | | | Git Repository    | git://[repository url][#reference]                              | `git://github.com/acme/myproject.git#refs/heads/mybranch`     | | ||||||
| 
 | 
 | ||||||
| If you don't specify a prefix, kaniko will assume a local directory. | If you don't specify a prefix, kaniko will assume a local directory. | ||||||
| For example, to use a GCS bucket called `kaniko-bucket`, you would pass in `--context=gs://kaniko-bucket/path/to/context.tar.gz`. | For example, to use a GCS bucket called `kaniko-bucket`, you would pass in `--context=gs://kaniko-bucket/path/to/context.tar.gz`. | ||||||
| 
 | 
 | ||||||
|  | ### Using Azure Blob Storage | ||||||
|  | If you are using Azure Blob Storage for context file, you will need to pass [Azure Storage Account Access Key](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2fazure%2fstorage%2fblobs%2ftoc.json) as an evironment variable named `AZURE_STORAGE_ACCESS_KEY` through Kubernetes Secrets | ||||||
|  | 
 | ||||||
| ### Using Private Git Repository | ### Using Private Git Repository | ||||||
| You can use `Personal Access Tokens` for Build Contexts from Private Repositories from [GitHub](https://blog.github.com/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/). | You can use `Personal Access Tokens` for Build Contexts from Private Repositories from [GitHub](https://blog.github.com/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/). | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | apiVersion: v1 | ||||||
|  | kind: Pod | ||||||
|  | metadata: | ||||||
|  |   name: kaniko | ||||||
|  | spec: | ||||||
|  |   containers: | ||||||
|  |   - name: kaniko | ||||||
|  |     image: gcr.io/kaniko-project/executor:latest | ||||||
|  |     args: ["--dockerfile=<path to Dockerfile within the build context>", | ||||||
|  |             "--context=https://myaccount.blob.core.windows.net/container/path/to/context.tar.gz", | ||||||
|  |             "--destination=<registry for image push>"] | ||||||
|  | ... | ||||||
|  |  env: | ||||||
|  |       - name: AZURE_STORAGE_ACCESS_KEY | ||||||
|  |         valueFrom: | ||||||
|  |           secretKeyRef: | ||||||
|  |             name: azure-storage-access-key | ||||||
|  |             key: azure-storage-access-key | ||||||
|  | ... | ||||||
|  |   volumes: | ||||||
|  |    - name: azure-storage-access-key | ||||||
|  |     secret: | ||||||
|  |       secretName: azure-storage-access-key | ||||||
|  | @ -0,0 +1,82 @@ | ||||||
|  | /* | ||||||
|  | Copyright 2018 Google LLC | ||||||
|  | 
 | ||||||
|  | Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | you may not use this file except in compliance with the License. | ||||||
|  | You may obtain a copy of the License at | ||||||
|  | 
 | ||||||
|  |     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  | 
 | ||||||
|  | Unless required by applicable law or agreed to in writing, software | ||||||
|  | distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | See the License for the specific language governing permissions and | ||||||
|  | limitations under the License. | ||||||
|  | */ | ||||||
|  | 
 | ||||||
|  | package buildcontext | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"errors" | ||||||
|  | 	"net/url" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-storage-blob-go/azblob" | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/pkg/constants" | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/pkg/util" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // AzureBlob struct for Azure Blob Storage processing
 | ||||||
|  | type AzureBlob struct { | ||||||
|  | 	context string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Download context file from given azure blob storage url and unpack it to BuildContextDir
 | ||||||
|  | func (b *AzureBlob) UnpackTarFromBuildContext() (string, error) { | ||||||
|  | 
 | ||||||
|  | 	//Get Azure_STORAGE_ACCESS_KEY from environment variables
 | ||||||
|  | 	accountKey := os.Getenv("AZURE_STORAGE_ACCESS_KEY") | ||||||
|  | 	if len(accountKey) == 0 { | ||||||
|  | 		return "", errors.New("AZURE_STORAGE_ACCESS_KEY environment variable is not set") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	//Get storage accoutname for Azure Blob Storage
 | ||||||
|  | 	u, _ := url.Parse(b.context) | ||||||
|  | 	parts := azblob.NewBlobURLParts(*u) | ||||||
|  | 	accountName := strings.Split(parts.Host, ".")[0] | ||||||
|  | 
 | ||||||
|  | 	//Generate credentail with accountname and accountkey
 | ||||||
|  | 	credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return parts.Host, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	//Create directory and file for downloading context file
 | ||||||
|  | 	directory := constants.BuildContextDir | ||||||
|  | 	tarPath := filepath.Join(directory, constants.ContextTar) | ||||||
|  | 	if err := os.MkdirAll(directory, 0750); err != nil { | ||||||
|  | 		return directory, err | ||||||
|  | 	} | ||||||
|  | 	file, err := os.Create(tarPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return directory, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	//Downloading contextfile from Azure Blob Storage
 | ||||||
|  | 	p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) | ||||||
|  | 	blobURL := azblob.NewBlobURL(*u, p) | ||||||
|  | 	ctx := context.Background() | ||||||
|  | 
 | ||||||
|  | 	if err := azblob.DownloadBlobToFile(ctx, blobURL, 0, 0, file, azblob.DownloadFromBlobOptions{}); err != nil { | ||||||
|  | 		return parts.Host, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := util.UnpackCompressedTar(tarPath, directory); err != nil { | ||||||
|  | 		return tarPath, err | ||||||
|  | 	} | ||||||
|  | 	// Remove the tar so it doesn't interfere with subsequent commands
 | ||||||
|  | 	return directory, os.Remove(tarPath) | ||||||
|  | } | ||||||
|  | @ -21,6 +21,7 @@ import ( | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
| 	"github.com/GoogleContainerTools/kaniko/pkg/constants" | 	"github.com/GoogleContainerTools/kaniko/pkg/constants" | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/pkg/util" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // BuildContext unifies calls to download and unpack the build context.
 | // BuildContext unifies calls to download and unpack the build context.
 | ||||||
|  | @ -35,6 +36,7 @@ func GetBuildContext(srcContext string) (BuildContext, error) { | ||||||
| 	split := strings.SplitAfter(srcContext, "://") | 	split := strings.SplitAfter(srcContext, "://") | ||||||
| 	prefix := split[0] | 	prefix := split[0] | ||||||
| 	context := split[1] | 	context := split[1] | ||||||
|  | 
 | ||||||
| 	switch prefix { | 	switch prefix { | ||||||
| 	case constants.GCSBuildContextPrefix: | 	case constants.GCSBuildContextPrefix: | ||||||
| 		return &GCS{context: context}, nil | 		return &GCS{context: context}, nil | ||||||
|  | @ -44,6 +46,11 @@ func GetBuildContext(srcContext string) (BuildContext, error) { | ||||||
| 		return &Dir{context: context}, nil | 		return &Dir{context: context}, nil | ||||||
| 	case constants.GitBuildContextPrefix: | 	case constants.GitBuildContextPrefix: | ||||||
| 		return &Git{context: context}, nil | 		return &Git{context: context}, nil | ||||||
|  | 	case constants.HTTPSBuildContextPrefix: | ||||||
|  | 		if util.ValidAzureBlobStorageHost(srcContext) { | ||||||
|  | 			return &AzureBlob{context: srcContext}, nil | ||||||
| 		} | 		} | ||||||
| 	return nil, errors.New("unknown build context prefix provided, please use one of the following: gs://, dir://, s3://, git://") | 		return nil, errors.New("url provided for https context is not in a supported format, please use the https url for Azure Blob Storage") | ||||||
|  | 	} | ||||||
|  | 	return nil, errors.New("unknown build context prefix provided, please use one of the following: gs://, dir://, s3://, git://, https://") | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -58,6 +58,7 @@ const ( | ||||||
| 	S3BuildContextPrefix       = "s3://" | 	S3BuildContextPrefix       = "s3://" | ||||||
| 	LocalDirBuildContextPrefix = "dir://" | 	LocalDirBuildContextPrefix = "dir://" | ||||||
| 	GitBuildContextPrefix      = "git://" | 	GitBuildContextPrefix      = "git://" | ||||||
|  | 	HTTPSBuildContextPrefix    = "https://" | ||||||
| 
 | 
 | ||||||
| 	HOME = "HOME" | 	HOME = "HOME" | ||||||
| 	// DefaultHOMEValue is the default value Docker sets for $HOME
 | 	// DefaultHOMEValue is the default value Docker sets for $HOME
 | ||||||
|  | @ -78,3 +79,10 @@ const ( | ||||||
| 
 | 
 | ||||||
| // ScratchEnvVars are the default environment variables needed for a scratch image.
 | // ScratchEnvVars are the default environment variables needed for a scratch image.
 | ||||||
| var ScratchEnvVars = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} | var ScratchEnvVars = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} | ||||||
|  | 
 | ||||||
|  | //AzureBlobStorageHostRegEx is ReqEX for Valid azure blob storage host suffix in url for AzureCloud, AzureChinaCloud, AzureGermanCloud and AzureUSGovernment
 | ||||||
|  | var AzureBlobStorageHostRegEx = []string{"https://(.+?).blob.core.windows.net/(.+)", | ||||||
|  | 	"https://(.+?).blob.core.chinacloudapi.cn/(.+)", | ||||||
|  | 	"https://(.+?).blob.core.cloudapi.de/(.+)", | ||||||
|  | 	"https://(.+?).blob.core.usgovcloudapi.net/(.+)", | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -0,0 +1,36 @@ | ||||||
|  | /* | ||||||
|  | Copyright 2018 Google LLC | ||||||
|  | 
 | ||||||
|  | Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | you may not use this file except in compliance with the License. | ||||||
|  | You may obtain a copy of the License at | ||||||
|  | 
 | ||||||
|  |     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  | 
 | ||||||
|  | Unless required by applicable law or agreed to in writing, software | ||||||
|  | distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | See the License for the specific language governing permissions and | ||||||
|  | limitations under the License. | ||||||
|  | */ | ||||||
|  | 
 | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"regexp" | ||||||
|  | 
 | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/pkg/constants" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | //Validate if the host url provided is with correct suffix for AzureCloud, AzureChinaCloud, AzureGermanCloud and AzureUSGovernment
 | ||||||
|  | //RegEX for supported suffix defined in constants.AzureBlobStorageHostRegEx
 | ||||||
|  | func ValidAzureBlobStorageHost(context string) bool { | ||||||
|  | 	for _, re := range constants.AzureBlobStorageHostRegEx { | ||||||
|  | 		validBlobURL := regexp.MustCompile(re) | ||||||
|  | 		if validBlobURL.MatchString(context) { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | @ -0,0 +1,75 @@ | ||||||
|  | /* | ||||||
|  | Copyright 2018 Google LLC | ||||||
|  | 
 | ||||||
|  | Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | you may not use this file except in compliance with the License. | ||||||
|  | You may obtain a copy of the License at | ||||||
|  | 
 | ||||||
|  |     http://www.apache.org/licenses/LICENSE-2.0
 | ||||||
|  | 
 | ||||||
|  | Unless required by applicable law or agreed to in writing, software | ||||||
|  | distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | See the License for the specific language governing permissions and | ||||||
|  | limitations under the License. | ||||||
|  | */ | ||||||
|  | 
 | ||||||
|  | package util | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"testing" | ||||||
|  | 
 | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/testutil" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func Test_ValidAzureBlobStorageHost(t *testing.T) { | ||||||
|  | 	tests := []struct { | ||||||
|  | 		name           string | ||||||
|  | 		context        string | ||||||
|  | 		expectedResult bool | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			name:           "AzureCloud", | ||||||
|  | 			context:        "https://myaccount.blob.core.windows.net/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "AzureChinaCloud", | ||||||
|  | 			context:        "https://myaccount.blob.core.chinacloudapi.cn/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "AzureGermanCloud", | ||||||
|  | 			context:        "https://myaccount.blob.core.cloudapi.de/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "AzureUSGovernment", | ||||||
|  | 			context:        "https://myaccount.blob.core.usgovcloudapi.net/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: true, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "Invalid Azure Blob Storage Hostname", | ||||||
|  | 			context:        "https://myaccount.anything.core.windows.net/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: false, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "URL Missing Accountname", | ||||||
|  | 			context:        "https://blob.core.windows.net/fairingcontext/context.tar.gz", | ||||||
|  | 			expectedResult: false, | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:           "URL Missing Containername", | ||||||
|  | 			context:        "https://myaccount.blob.core.windows.net/", | ||||||
|  | 			expectedResult: false, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, test := range tests { | ||||||
|  | 		t.Run(test.name, func(t *testing.T) { | ||||||
|  | 			result := ValidAzureBlobStorageHost(test.context) | ||||||
|  | 			testutil.CheckDeepEqual(t, test.expectedResult, result) | ||||||
|  | 
 | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -0,0 +1,21 @@ | ||||||
|  |     MIT License | ||||||
|  | 
 | ||||||
|  |     Copyright (c) Microsoft Corporation. All rights reserved. | ||||||
|  | 
 | ||||||
|  |     Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  |     of this software and associated documentation files (the "Software"), to deal | ||||||
|  |     in the Software without restriction, including without limitation the rights | ||||||
|  |     to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  |     copies of the Software, and to permit persons to whom the Software is | ||||||
|  |     furnished to do so, subject to the following conditions: | ||||||
|  | 
 | ||||||
|  |     The above copyright notice and this permission notice shall be included in all | ||||||
|  |     copies or substantial portions of the Software. | ||||||
|  | 
 | ||||||
|  |     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  |     AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  |     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  |     OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  |     SOFTWARE | ||||||
|  | @ -0,0 +1,284 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"github.com/mattn/go-ieproxy" | ||||||
|  | 	"net" | ||||||
|  | 	"net/http" | ||||||
|  | 	"os" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // The Factory interface represents an object that can create its Policy object. Each HTTP request sent
 | ||||||
|  | // requires that this Factory create a new instance of its Policy object.
 | ||||||
|  | type Factory interface { | ||||||
|  | 	New(next Policy, po *PolicyOptions) Policy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
 | ||||||
|  | type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc | ||||||
|  | 
 | ||||||
|  | // New calls f(next,po).
 | ||||||
|  | func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { | ||||||
|  | 	return f(next, po) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
 | ||||||
|  | // the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
 | ||||||
|  | // Response goes backward through the linked-list for additional processing.
 | ||||||
|  | // NOTE: Request is passed by value so changes do not change the caller's version of
 | ||||||
|  | // the request. However, Request has some fields that reference mutable objects (not strings).
 | ||||||
|  | // These references are copied; a deep copy is not performed. Specifically, this means that
 | ||||||
|  | // you should avoid modifying the objects referred to by these fields: URL, Header, Body,
 | ||||||
|  | // GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
 | ||||||
|  | type Policy interface { | ||||||
|  | 	Do(ctx context.Context, request Request) (Response, error) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
 | ||||||
|  | type PolicyFunc func(ctx context.Context, request Request) (Response, error) | ||||||
|  | 
 | ||||||
|  | // Do calls f(ctx, request).
 | ||||||
|  | func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { | ||||||
|  | 	return f(ctx, request) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Options configures a Pipeline's behavior.
 | ||||||
|  | type Options struct { | ||||||
|  | 	HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
 | ||||||
|  | 	Log        LogOptions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LogLevel tells a logger the minimum level to log. When code reports a log entry,
 | ||||||
|  | // the LogLevel indicates the level of the log entry. The logger only records entries
 | ||||||
|  | // whose level is at least the level it was told to log. See the Log* constants.
 | ||||||
|  | // For example, if a logger is configured with LogError, then LogError, LogPanic,
 | ||||||
|  | // and LogFatal entries will be logged; lower level entries are ignored.
 | ||||||
|  | type LogLevel uint32 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// LogNone tells a logger not to log any entries passed to it.
 | ||||||
|  | 	LogNone LogLevel = iota | ||||||
|  | 
 | ||||||
|  | 	// LogFatal tells a logger to log all LogFatal entries passed to it.
 | ||||||
|  | 	LogFatal | ||||||
|  | 
 | ||||||
|  | 	// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
 | ||||||
|  | 	LogPanic | ||||||
|  | 
 | ||||||
|  | 	// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
 | ||||||
|  | 	LogError | ||||||
|  | 
 | ||||||
|  | 	// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
 | ||||||
|  | 	LogWarning | ||||||
|  | 
 | ||||||
|  | 	// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
 | ||||||
|  | 	LogInfo | ||||||
|  | 
 | ||||||
|  | 	// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
 | ||||||
|  | 	LogDebug | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // LogOptions configures the pipeline's logging mechanism & level filtering.
 | ||||||
|  | type LogOptions struct { | ||||||
|  | 	Log func(level LogLevel, message string) | ||||||
|  | 
 | ||||||
|  | 	// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
 | ||||||
|  | 	// An application can return different values over the its lifetime; this allows the application to dynamically
 | ||||||
|  | 	// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
 | ||||||
|  | 	// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
 | ||||||
|  | 	// Usually, the function will be implemented simply like this: return level <= LogWarning
 | ||||||
|  | 	ShouldLog func(level LogLevel) bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type pipeline struct { | ||||||
|  | 	factories []Factory | ||||||
|  | 	options   Options | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
 | ||||||
|  | // You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
 | ||||||
|  | // and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
 | ||||||
|  | // method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
 | ||||||
|  | // the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
 | ||||||
|  | //
 | ||||||
|  | // When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
 | ||||||
|  | // THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
 | ||||||
|  | // (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
 | ||||||
|  | // Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
 | ||||||
|  | type Pipeline interface { | ||||||
|  | 	Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
 | ||||||
|  | func NewPipeline(factories []Factory, o Options) Pipeline { | ||||||
|  | 	if o.HTTPSender == nil { | ||||||
|  | 		o.HTTPSender = newDefaultHTTPClientFactory() | ||||||
|  | 	} | ||||||
|  | 	if o.Log.Log == nil { | ||||||
|  | 		o.Log.Log = func(LogLevel, string) {} // No-op logger
 | ||||||
|  | 	} | ||||||
|  | 	return &pipeline{factories: factories, options: o} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
 | ||||||
|  | // replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
 | ||||||
|  | // are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
 | ||||||
|  | // ultimately sends the transformed HTTP request over the network.
 | ||||||
|  | func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { | ||||||
|  | 	response, err := p.newPolicies(methodFactory).Do(ctx, request) | ||||||
|  | 	request.close() | ||||||
|  | 	return response, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *pipeline) newPolicies(methodFactory Factory) Policy { | ||||||
|  | 	// The last Policy is the one that actually sends the request over the wire and gets the response.
 | ||||||
|  | 	// It is overridable via the Options' HTTPSender field.
 | ||||||
|  | 	po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
 | ||||||
|  | 	next := p.options.HTTPSender.New(nil, po) | ||||||
|  | 
 | ||||||
|  | 	// Walk over the slice of Factory objects in reverse (from wire to API)
 | ||||||
|  | 	markers := 0 | ||||||
|  | 	for i := len(p.factories) - 1; i >= 0; i-- { | ||||||
|  | 		factory := p.factories[i] | ||||||
|  | 		if _, ok := factory.(methodFactoryMarker); ok { | ||||||
|  | 			markers++ | ||||||
|  | 			if markers > 1 { | ||||||
|  | 				panic("MethodFactoryMarker can only appear once in the pipeline") | ||||||
|  | 			} | ||||||
|  | 			if methodFactory != nil { | ||||||
|  | 				// Replace MethodFactoryMarker with passed-in methodFactory
 | ||||||
|  | 				next = methodFactory.New(next, po) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			// Use the slice's Factory to construct its Policy
 | ||||||
|  | 			next = factory.New(next, po) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Each Factory has created its Policy
 | ||||||
|  | 	if markers == 0 && methodFactory != nil { | ||||||
|  | 		panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") | ||||||
|  | 	} | ||||||
|  | 	return next // Return head of the Policy object linked-list
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // A PolicyOptions represents optional information that can be used by a node in the
 | ||||||
|  | // linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
 | ||||||
|  | // which passes it (if desired) to the Policy object it creates. Today, the Policy object
 | ||||||
|  | // uses the options to perform logging. But, in the future, this could be used for more.
 | ||||||
|  | type PolicyOptions struct { | ||||||
|  | 	pipeline *pipeline | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ShouldLog returns true if the specified log level should be logged.
 | ||||||
|  | func (po *PolicyOptions) ShouldLog(level LogLevel) bool { | ||||||
|  | 	if po.pipeline.options.Log.ShouldLog != nil { | ||||||
|  | 		return po.pipeline.options.Log.ShouldLog(level) | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Log logs a string to the Pipeline's Logger.
 | ||||||
|  | func (po *PolicyOptions) Log(level LogLevel, msg string) { | ||||||
|  | 	if !po.ShouldLog(level) { | ||||||
|  | 		return // Short circuit message formatting if we're not logging it
 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// We are logging it, ensure trailing newline
 | ||||||
|  | 	if len(msg) == 0 || msg[len(msg)-1] != '\n' { | ||||||
|  | 		msg += "\n" // Ensure trailing newline
 | ||||||
|  | 	} | ||||||
|  | 	po.pipeline.options.Log.Log(level, msg) | ||||||
|  | 
 | ||||||
|  | 	// If logger doesn't handle fatal/panic, we'll do it here.
 | ||||||
|  | 	if level == LogFatal { | ||||||
|  | 		os.Exit(1) | ||||||
|  | 	} else if level == LogPanic { | ||||||
|  | 		panic(msg) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var pipelineHTTPClient = newDefaultHTTPClient() | ||||||
|  | 
 | ||||||
|  | func newDefaultHTTPClient() *http.Client { | ||||||
|  | 	// We want the Transport to have a large connection pool
 | ||||||
|  | 	return &http.Client{ | ||||||
|  | 		Transport: &http.Transport{ | ||||||
|  | 			Proxy: ieproxy.GetProxyFunc(), | ||||||
|  | 			// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
 | ||||||
|  | 			Dial /*Context*/ : (&net.Dialer{ | ||||||
|  | 				Timeout:   30 * time.Second, | ||||||
|  | 				KeepAlive: 30 * time.Second, | ||||||
|  | 				DualStack: true, | ||||||
|  | 			}).Dial, /*Context*/ | ||||||
|  | 			MaxIdleConns:           0, // No limit
 | ||||||
|  | 			MaxIdleConnsPerHost:    100, | ||||||
|  | 			IdleConnTimeout:        90 * time.Second, | ||||||
|  | 			TLSHandshakeTimeout:    10 * time.Second, | ||||||
|  | 			ExpectContinueTimeout:  1 * time.Second, | ||||||
|  | 			DisableKeepAlives:      false, | ||||||
|  | 			DisableCompression:     false, | ||||||
|  | 			MaxResponseHeaderBytes: 0, | ||||||
|  | 			//ResponseHeaderTimeout:  time.Duration{},
 | ||||||
|  | 			//ExpectContinueTimeout:  time.Duration{},
 | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
 | ||||||
|  | func newDefaultHTTPClientFactory() Factory { | ||||||
|  | 	return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { | ||||||
|  | 		return func(ctx context.Context, request Request) (Response, error) { | ||||||
|  | 			r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) | ||||||
|  | 			if err != nil { | ||||||
|  | 				err = NewError(err, "HTTP request failed") | ||||||
|  | 			} | ||||||
|  | 			return NewHTTPResponse(r), err | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var mfm = methodFactoryMarker{} // Singleton
 | ||||||
|  | 
 | ||||||
|  | // MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
 | ||||||
|  | // MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
 | ||||||
|  | // methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
 | ||||||
|  | func MethodFactoryMarker() Factory { | ||||||
|  | 	return mfm | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type methodFactoryMarker struct { | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { | ||||||
|  | 	panic("methodFactoryMarker policy should have been replaced with a method policy") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
 | ||||||
|  | // By default no implemetation is provided here, because pipeline may be used in many different
 | ||||||
|  | // contexts, so the correct implementation is context-dependent
 | ||||||
|  | type LogSanitizer interface { | ||||||
|  | 	SanitizeLogMessage(raw string) string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var sanitizer LogSanitizer | ||||||
|  | var enableForceLog bool = true | ||||||
|  | 
 | ||||||
|  | // SetLogSanitizer can be called to supply a custom LogSanitizer.
 | ||||||
|  | // There is no threadsafety or locking on the underlying variable,
 | ||||||
|  | // so call this function just once at startup of your application
 | ||||||
|  | // (Don't later try to change the sanitizer on the fly).
 | ||||||
|  | func SetLogSanitizer(s LogSanitizer)(){ | ||||||
|  | 	sanitizer = s | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetForceLogEnabled can be used to disable ForceLog
 | ||||||
|  | // There is no threadsafety or locking on the underlying variable,
 | ||||||
|  | // so call this function just once at startup of your application
 | ||||||
|  | // (Don't later try to change the setting on the fly).
 | ||||||
|  | func SetForceLogEnabled(enable bool)() { | ||||||
|  | 	enableForceLog = enable | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @ -0,0 +1,14 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | // ForceLog should rarely be used. It forceable logs an entry to the
 | ||||||
|  | // Windows Event Log (on Windows) or to the SysLog (on Linux)
 | ||||||
|  | func ForceLog(level LogLevel, msg string) { | ||||||
|  | 	if !enableForceLog { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	if sanitizer != nil { | ||||||
|  | 		msg = sanitizer.SanitizeLogMessage(msg) | ||||||
|  | 	} | ||||||
|  | 	forceLog(level, msg) | ||||||
|  | } | ||||||
							
								
								
									
										33
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							
							
						
						
									
										33
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							|  | @ -0,0 +1,33 @@ | ||||||
|  | // +build !windows,!nacl,!plan9
 | ||||||
|  | 
 | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"log" | ||||||
|  | 	"log/syslog" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // forceLog should rarely be used. It forceable logs an entry to the
 | ||||||
|  | // Windows Event Log (on Windows) or to the SysLog (on Linux)
 | ||||||
|  | func forceLog(level LogLevel, msg string) { | ||||||
|  | 	if defaultLogger == nil { | ||||||
|  | 		return // Return fast if we failed to create the logger.
 | ||||||
|  | 	} | ||||||
|  | 	// We are logging it, ensure trailing newline
 | ||||||
|  | 	if len(msg) == 0 || msg[len(msg)-1] != '\n' { | ||||||
|  | 		msg += "\n" // Ensure trailing newline
 | ||||||
|  | 	} | ||||||
|  | 	switch level { | ||||||
|  | 	case LogFatal: | ||||||
|  | 		defaultLogger.Fatal(msg) | ||||||
|  | 	case LogPanic: | ||||||
|  | 		defaultLogger.Panic(msg) | ||||||
|  | 	case LogError, LogWarning, LogInfo: | ||||||
|  | 		defaultLogger.Print(msg) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var defaultLogger = func() *log.Logger { | ||||||
|  | 	l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) | ||||||
|  | 	return l | ||||||
|  | }() | ||||||
							
								
								
									
										61
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							
							
						
						
									
										61
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							|  | @ -0,0 +1,61 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"syscall" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // forceLog should rarely be used. It forceable logs an entry to the
 | ||||||
|  | // Windows Event Log (on Windows) or to the SysLog (on Linux)
 | ||||||
|  | func forceLog(level LogLevel, msg string) { | ||||||
|  | 	var el eventType | ||||||
|  | 	switch level { | ||||||
|  | 	case LogError, LogFatal, LogPanic: | ||||||
|  | 		el = elError | ||||||
|  | 	case LogWarning: | ||||||
|  | 		el = elWarning | ||||||
|  | 	case LogInfo: | ||||||
|  | 		el = elInfo | ||||||
|  | 	} | ||||||
|  | 	// We are logging it, ensure trailing newline
 | ||||||
|  | 	if len(msg) == 0 || msg[len(msg)-1] != '\n' { | ||||||
|  | 		msg += "\n" // Ensure trailing newline
 | ||||||
|  | 	} | ||||||
|  | 	reportEvent(el, 0, msg) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type eventType int16 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	elSuccess eventType = 0 | ||||||
|  | 	elError   eventType = 1 | ||||||
|  | 	elWarning eventType = 2 | ||||||
|  | 	elInfo    eventType = 4 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var reportEvent = func() func(eventType eventType, eventID int32, msg string) { | ||||||
|  | 	advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
 | ||||||
|  | 	registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") | ||||||
|  | 
 | ||||||
|  | 	sourceName, _ := os.Executable() | ||||||
|  | 	sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) | ||||||
|  | 	handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) | ||||||
|  | 	if lastErr == nil { // On error, logging is a no-op
 | ||||||
|  | 		return func(eventType eventType, eventID int32, msg string) {} | ||||||
|  | 	} | ||||||
|  | 	reportEvent := advAPI32.MustFindProc("ReportEventW") | ||||||
|  | 	return func(eventType eventType, eventID int32, msg string) { | ||||||
|  | 		s, _ := syscall.UTF16PtrFromString(msg) | ||||||
|  | 		_, _, _ = reportEvent.Call( | ||||||
|  | 			uintptr(handle),             // HANDLE  hEventLog
 | ||||||
|  | 			uintptr(eventType),          // WORD    wType
 | ||||||
|  | 			uintptr(0),                  // WORD    wCategory
 | ||||||
|  | 			uintptr(eventID),            // DWORD   dwEventID
 | ||||||
|  | 			uintptr(0),                  // PSID    lpUserSid
 | ||||||
|  | 			uintptr(1),                  // WORD    wNumStrings
 | ||||||
|  | 			uintptr(0),                  // DWORD   dwDataSize
 | ||||||
|  | 			uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
 | ||||||
|  | 			uintptr(0))                  // LPVOID  lpRawData
 | ||||||
|  | 	} | ||||||
|  | }() | ||||||
|  | @ -0,0 +1,161 @@ | ||||||
|  | // Copyright 2017 Microsoft Corporation. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by an MIT
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | Package pipeline implements an HTTP request/response middleware pipeline whose | ||||||
|  | policy objects mutate an HTTP request's URL, query parameters, and/or headers before | ||||||
|  | the request is sent over the wire. | ||||||
|  | 
 | ||||||
|  | Not all policy objects mutate an HTTP request; some policy objects simply impact the | ||||||
|  | flow of requests/responses by performing operations such as logging, retry policies, | ||||||
|  | timeouts, failure injection, and deserialization of response payloads. | ||||||
|  | 
 | ||||||
|  | Implementing the Policy Interface | ||||||
|  | 
 | ||||||
|  | To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do | ||||||
|  | method is called when an HTTP request wants to be sent over the network. Your Do method can perform any | ||||||
|  | operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query | ||||||
|  | parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object | ||||||
|  | in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy | ||||||
|  | object sends the HTTP request over the network (by calling the HTTPSender's Do method). | ||||||
|  | 
 | ||||||
|  | When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response | ||||||
|  | (in reverse order). The Policy object can log the response, retry the operation if due to a transient failure | ||||||
|  | or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response | ||||||
|  | to the code that initiated the original HTTP request. | ||||||
|  | 
 | ||||||
|  | Here is a template for how to define a pipeline.Policy object: | ||||||
|  | 
 | ||||||
|  |    type myPolicy struct { | ||||||
|  |       node   PolicyNode | ||||||
|  |       // TODO: Add configuration/setting fields here (if desired)...
 | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  |    func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  |       // TODO: Mutate/process the HTTP request here...
 | ||||||
|  |       response, err := p.node.Do(ctx, request)	// Forward HTTP request to next Policy & get HTTP response
 | ||||||
|  |       // TODO: Mutate/process the HTTP response here...
 | ||||||
|  |       return response, err	// Return response/error to previous Policy
 | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  | Implementing the Factory Interface | ||||||
|  | 
 | ||||||
|  | Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New | ||||||
|  | method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is | ||||||
|  | passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and | ||||||
|  | a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object | ||||||
|  | passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. | ||||||
|  | 
 | ||||||
|  | Here is a template for how to define a pipeline.Policy object: | ||||||
|  | 
 | ||||||
|  |    // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
 | ||||||
|  |    // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
 | ||||||
|  |    type myPolicyFactory struct { | ||||||
|  |       // TODO: Add any configuration/setting fields if desired...
 | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  |    func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { | ||||||
|  |       return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
 | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  | Using your Factory and Policy objects via a Pipeline | ||||||
|  | 
 | ||||||
|  | To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes | ||||||
|  | this slice to the pipeline.NewPipeline function. | ||||||
|  | 
 | ||||||
|  |    func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline | ||||||
|  | 
 | ||||||
|  | This function also requires an object implementing the HTTPSender interface. For simple scenarios, | ||||||
|  | passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually | ||||||
|  | send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender | ||||||
|  | object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects | ||||||
|  | or other objects that can simulate the network requests for testing purposes. | ||||||
|  | 
 | ||||||
|  | Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple | ||||||
|  | wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a | ||||||
|  | context.Context for cancelling the HTTP request (if desired). | ||||||
|  | 
 | ||||||
|  |    type Pipeline interface { | ||||||
|  |       Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  | Do iterates over the slice of Factory objects and tells each one to create its corresponding | ||||||
|  | Policy object. After the linked-list of Policy objects have been created, Do calls the first | ||||||
|  | Policy object passing it the Context & HTTP request parameters. These parameters now flow through | ||||||
|  | all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. | ||||||
|  | The last Policy object sends the message over the network. | ||||||
|  | 
 | ||||||
|  | When the network operation completes, the HTTP response and error return values pass | ||||||
|  | back through the same Policy objects in reverse order. Most Policy objects ignore the | ||||||
|  | response/error but some log the result, retry the operation (depending on the exact | ||||||
|  | reason the operation failed), or deserialize the response's body. Your own Policy | ||||||
|  | objects can do whatever they like when processing outgoing requests or incoming responses. | ||||||
|  | 
 | ||||||
|  | Note that after an I/O request runs to completion, the Policy objects for that request | ||||||
|  | are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing | ||||||
|  | them to be created once and reused over many I/O operations. This allows for efficient use of | ||||||
|  | memory and also makes them safely usable by multiple goroutines concurrently. | ||||||
|  | 
 | ||||||
|  | Inserting a Method-Specific Factory into the Linked-List of Policy Objects | ||||||
|  | 
 | ||||||
|  | While Pipeline and Factory objects can be reused over many different operations, it is | ||||||
|  | common to have special behavior for a specific operation/method. For example, a method | ||||||
|  | may need to deserialize the response's body to an instance of a specific data type. | ||||||
|  | To accommodate this, the Pipeline's Do method takes an additional method-specific | ||||||
|  | Factory object. The Do method tells this Factory to create a Policy object and | ||||||
|  | injects this method-specific Policy object into the linked-list of Policy objects. | ||||||
|  | 
 | ||||||
|  | When creating a Pipeline object, the slice of Factory objects passed must have 1 | ||||||
|  | (and only 1) entry marking where the method-specific Factory should be injected. | ||||||
|  | The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: | ||||||
|  | 
 | ||||||
|  |    func MethodFactoryMarker() pipeline.Factory | ||||||
|  | 
 | ||||||
|  | Creating an HTTP Request Object | ||||||
|  | 
 | ||||||
|  | The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. | ||||||
|  | Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard | ||||||
|  | http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: | ||||||
|  | 
 | ||||||
|  |    func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) | ||||||
|  | 
 | ||||||
|  | To this function, you must pass a pipeline.RequestOptions that looks like this: | ||||||
|  | 
 | ||||||
|  |    type RequestOptions struct { | ||||||
|  |       // The readable and seekable stream to be sent to the server as the request's body.
 | ||||||
|  |       Body io.ReadSeeker | ||||||
|  | 
 | ||||||
|  |       // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
 | ||||||
|  |       Progress ProgressReceiver | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  | The method and struct ensure that the request's body stream is a read/seekable stream. | ||||||
|  | A seekable stream is required so that upon retry, the final Policy object can seek | ||||||
|  | the stream back to the beginning before retrying the network request and re-uploading the | ||||||
|  | body. In addition, you can associate a ProgressReceiver callback function which will be | ||||||
|  | invoked periodically to report progress while bytes are being read from the body stream | ||||||
|  | and sent over the network. | ||||||
|  | 
 | ||||||
|  | Processing the HTTP Response | ||||||
|  | 
 | ||||||
|  | When an HTTP response comes in from the network, a reference to Go's http.Response struct is | ||||||
|  | embedded in a struct that implements the pipeline.Response interface: | ||||||
|  | 
 | ||||||
|  |    type Response interface { | ||||||
|  |       Response() *http.Response | ||||||
|  |    } | ||||||
|  | 
 | ||||||
|  | This interface is returned through all the Policy objects. Each Policy object can call the Response | ||||||
|  | interface's Response method to examine (or mutate) the embedded http.Response object. | ||||||
|  | 
 | ||||||
|  | A Policy object can internally define another struct (implementing the pipeline.Response interface) | ||||||
|  | that embeds an http.Response and adds additional fields and return this structure to other Policy | ||||||
|  | objects. This allows a Policy object to deserialize the body to some other struct and return the | ||||||
|  | original http.Response and the additional struct back through the Policy chain. Other Policy objects | ||||||
|  | can see the Response but cannot see the additional struct with the deserialized body. After all the | ||||||
|  | Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. | ||||||
|  | The caller of this method can perform a type assertion attempting to get back to the struct type | ||||||
|  | really returned by the Policy object. If the type assertion is successful, the caller now has | ||||||
|  | access to both the http.Response and the deserialized struct object.*/ | ||||||
|  | package pipeline | ||||||
|  | @ -0,0 +1,181 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"runtime" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type causer interface { | ||||||
|  | 	Cause() error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func errorWithPC(msg string, pc uintptr) string { | ||||||
|  | 	s := "" | ||||||
|  | 	if fn := runtime.FuncForPC(pc); fn != nil { | ||||||
|  | 		file, line := fn.FileLine(pc) | ||||||
|  | 		s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) | ||||||
|  | 	} | ||||||
|  | 	s += msg + "\n\n" | ||||||
|  | 	return s | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getPC(callersToSkip int) uintptr { | ||||||
|  | 	// Get the PC of Initialize method's caller.
 | ||||||
|  | 	pc := [1]uintptr{} | ||||||
|  | 	_ = runtime.Callers(callersToSkip, pc[:]) | ||||||
|  | 	return pc[0] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ErrorNode can be an embedded field in a private error object. This field
 | ||||||
|  | // adds Program Counter support and a 'cause' (reference to a preceding error).
 | ||||||
|  | // When initializing a error type with this embedded field, initialize the
 | ||||||
|  | // ErrorNode field by calling ErrorNode{}.Initialize(cause).
 | ||||||
|  | type ErrorNode struct { | ||||||
|  | 	pc    uintptr // Represents a Program Counter that you can get symbols for.
 | ||||||
|  | 	cause error   // Refers to the preceding error (or nil)
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns a string with the PC's symbols or "" if the PC is invalid.
 | ||||||
|  | // When defining a new error type, have its Error method call this one passing
 | ||||||
|  | // it the string representation of the error.
 | ||||||
|  | func (e *ErrorNode) Error(msg string) string { | ||||||
|  | 	s := errorWithPC(msg, e.pc) | ||||||
|  | 	if e.cause != nil { | ||||||
|  | 		s += e.cause.Error() + "\n" | ||||||
|  | 	} | ||||||
|  | 	return s | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cause returns the error that preceded this error.
 | ||||||
|  | func (e *ErrorNode) Cause() error { return e.cause } | ||||||
|  | 
 | ||||||
|  | // Temporary returns true if the error occurred due to a temporary condition.
 | ||||||
|  | func (e ErrorNode) Temporary() bool { | ||||||
|  | 	type temporary interface { | ||||||
|  | 		Temporary() bool | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for err := e.cause; err != nil; { | ||||||
|  | 		if t, ok := err.(temporary); ok { | ||||||
|  | 			return t.Temporary() | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if cause, ok := err.(causer); ok { | ||||||
|  | 			err = cause.Cause() | ||||||
|  | 		} else { | ||||||
|  | 			err = nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Timeout returns true if the error occurred due to time expiring.
 | ||||||
|  | func (e ErrorNode) Timeout() bool { | ||||||
|  | 	type timeout interface { | ||||||
|  | 		Timeout() bool | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for err := e.cause; err != nil; { | ||||||
|  | 		if t, ok := err.(timeout); ok { | ||||||
|  | 			return t.Timeout() | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if cause, ok := err.(causer); ok { | ||||||
|  | 			err = cause.Cause() | ||||||
|  | 		} else { | ||||||
|  | 			err = nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Initialize is used to initialize an embedded ErrorNode field.
 | ||||||
|  | // It captures the caller's program counter and saves the cause (preceding error).
 | ||||||
|  | // To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
 | ||||||
|  | // value of 3 is very common; but, depending on your code nesting, you may need
 | ||||||
|  | // a different value.
 | ||||||
|  | func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { | ||||||
|  | 	pc := getPC(callersToSkip) | ||||||
|  | 	return ErrorNode{pc: pc, cause: cause} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cause walks all the preceding errors and return the originating error.
 | ||||||
|  | func Cause(err error) error { | ||||||
|  | 	for err != nil { | ||||||
|  | 		cause, ok := err.(causer) | ||||||
|  | 		if !ok { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 		err = cause.Cause() | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ErrorNodeNoCause can be an embedded field in a private error object. This field
 | ||||||
|  | // adds Program Counter support.
 | ||||||
|  | // When initializing a error type with this embedded field, initialize the
 | ||||||
|  | // ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
 | ||||||
|  | type ErrorNodeNoCause struct { | ||||||
|  | 	pc uintptr // Represents a Program Counter that you can get symbols for.
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns a string with the PC's symbols or "" if the PC is invalid.
 | ||||||
|  | // When defining a new error type, have its Error method call this one passing
 | ||||||
|  | // it the string representation of the error.
 | ||||||
|  | func (e *ErrorNodeNoCause) Error(msg string) string { | ||||||
|  | 	return errorWithPC(msg, e.pc) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Temporary returns true if the error occurred due to a temporary condition.
 | ||||||
|  | func (e ErrorNodeNoCause) Temporary() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Timeout returns true if the error occurred due to time expiring.
 | ||||||
|  | func (e ErrorNodeNoCause) Timeout() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Initialize is used to initialize an embedded ErrorNode field.
 | ||||||
|  | // It captures the caller's program counter.
 | ||||||
|  | // To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
 | ||||||
|  | // value of 3 is very common; but, depending on your code nesting, you may need
 | ||||||
|  | // a different value.
 | ||||||
|  | func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause { | ||||||
|  | 	pc := getPC(callersToSkip) | ||||||
|  | 	return ErrorNodeNoCause{pc: pc} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewError creates a simple string error (like Error.New). But, this
 | ||||||
|  | // error also captures the caller's Program Counter and the preceding error (if provided).
 | ||||||
|  | func NewError(cause error, msg string) error { | ||||||
|  | 	if cause != nil { | ||||||
|  | 		return &pcError{ | ||||||
|  | 			ErrorNode: ErrorNode{}.Initialize(cause, 3), | ||||||
|  | 			msg:       msg, | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return &pcErrorNoCause{ | ||||||
|  | 		ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3), | ||||||
|  | 		msg:              msg, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
 | ||||||
|  | type pcError struct { | ||||||
|  | 	ErrorNode | ||||||
|  | 	msg string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error satisfies the error interface. It shows the error with Program Counter
 | ||||||
|  | // symbols and calls Error on the preceding error so you can see the full error chain.
 | ||||||
|  | func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } | ||||||
|  | 
 | ||||||
|  | // pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
 | ||||||
|  | type pcErrorNoCause struct { | ||||||
|  | 	ErrorNodeNoCause | ||||||
|  | 	msg string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error satisfies the error interface. It shows the error with Program Counter symbols.
 | ||||||
|  | func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) } | ||||||
							
								
								
									
										82
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							
							
						
						
									
										82
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							|  | @ -0,0 +1,82 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import "io" | ||||||
|  | 
 | ||||||
|  | // ********** The following is common between the request body AND the response body.
 | ||||||
|  | 
 | ||||||
|  | // ProgressReceiver defines the signature of a callback function invoked as progress is reported.
 | ||||||
|  | type ProgressReceiver func(bytesTransferred int64) | ||||||
|  | 
 | ||||||
|  | // ********** The following are specific to the request body (a ReadSeekCloser)
 | ||||||
|  | 
 | ||||||
|  | // This struct is used when sending a body to the network
 | ||||||
|  | type requestBodyProgress struct { | ||||||
|  | 	requestBody io.ReadSeeker // Seeking is required to support retries
 | ||||||
|  | 	pr          ProgressReceiver | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
 | ||||||
|  | func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { | ||||||
|  | 	if pr == nil { | ||||||
|  | 		panic("pr must not be nil") | ||||||
|  | 	} | ||||||
|  | 	return &requestBodyProgress{requestBody: requestBody, pr: pr} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read reads a block of data from an inner stream and reports progress
 | ||||||
|  | func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { | ||||||
|  | 	n, err = rbp.requestBody.Read(p) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	// Invokes the user's callback method to report progress
 | ||||||
|  | 	position, err := rbp.requestBody.Seek(0, io.SeekCurrent) | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	rbp.pr(position) | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { | ||||||
|  | 	return rbp.requestBody.Seek(offset, whence) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
 | ||||||
|  | func (rbp *requestBodyProgress) Close() error { | ||||||
|  | 	if c, ok := rbp.requestBody.(io.Closer); ok { | ||||||
|  | 		return c.Close() | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ********** The following are specific to the response body (a ReadCloser)
 | ||||||
|  | 
 | ||||||
|  | // This struct is used when sending a body to the network
 | ||||||
|  | type responseBodyProgress struct { | ||||||
|  | 	responseBody io.ReadCloser | ||||||
|  | 	pr           ProgressReceiver | ||||||
|  | 	offset       int64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
 | ||||||
|  | func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { | ||||||
|  | 	if pr == nil { | ||||||
|  | 		panic("pr must not be nil") | ||||||
|  | 	} | ||||||
|  | 	return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read reads a block of data from an inner stream and reports progress
 | ||||||
|  | func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { | ||||||
|  | 	n, err = rbp.responseBody.Read(p) | ||||||
|  | 	rbp.offset += int64(n) | ||||||
|  | 
 | ||||||
|  | 	// Invokes the user's callback method to report progress
 | ||||||
|  | 	rbp.pr(rbp.offset) | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (rbp *responseBodyProgress) Close() error { | ||||||
|  | 	return rbp.responseBody.Close() | ||||||
|  | } | ||||||
							
								
								
									
										147
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							
							
						
						
									
										147
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							|  | @ -0,0 +1,147 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
 | ||||||
|  | type Request struct { | ||||||
|  | 	*http.Request | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRequest initializes a new HTTP request object with any desired options.
 | ||||||
|  | func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { | ||||||
|  | 	// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
 | ||||||
|  | 
 | ||||||
|  | 	// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
 | ||||||
|  | 	request.Request = &http.Request{ | ||||||
|  | 		Method:     method, | ||||||
|  | 		URL:        &url, | ||||||
|  | 		Proto:      "HTTP/1.1", | ||||||
|  | 		ProtoMajor: 1, | ||||||
|  | 		ProtoMinor: 1, | ||||||
|  | 		Header:     make(http.Header), | ||||||
|  | 		Host:       url.Host, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if body != nil { | ||||||
|  | 		err = request.SetBody(body) | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetBody sets the body and content length, assumes body is not nil.
 | ||||||
|  | func (r Request) SetBody(body io.ReadSeeker) error { | ||||||
|  | 	size, err := body.Seek(0, io.SeekEnd) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	body.Seek(0, io.SeekStart) | ||||||
|  | 	r.ContentLength = size | ||||||
|  | 	r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} | ||||||
|  | 
 | ||||||
|  | 	if size != 0 { | ||||||
|  | 		r.Body = &retryableRequestBody{body: body} | ||||||
|  | 		r.GetBody = func() (io.ReadCloser, error) { | ||||||
|  | 			_, err := body.Seek(0, io.SeekStart) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, err | ||||||
|  | 			} | ||||||
|  | 			return r.Body, nil | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
 | ||||||
|  | 		r.Body = http.NoBody | ||||||
|  | 		r.GetBody = func() (io.ReadCloser, error) { | ||||||
|  | 			return http.NoBody, nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// close the user-provided empty body
 | ||||||
|  | 		if c, ok := body.(io.Closer); ok { | ||||||
|  | 			c.Close() | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Copy makes a copy of an http.Request. Specifically, it makes a deep copy
 | ||||||
|  | // of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
 | ||||||
|  | // RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
 | ||||||
|  | // Cancel, Response, and ctx fields. Copy panics if any of these fields are
 | ||||||
|  | // not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
 | ||||||
|  | func (r Request) Copy() Request { | ||||||
|  | 	if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { | ||||||
|  | 		panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + | ||||||
|  | 			"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") | ||||||
|  | 	} | ||||||
|  | 	copy := *r.Request          // Copy the request
 | ||||||
|  | 	urlCopy := *(r.Request.URL) // Copy the URL
 | ||||||
|  | 	copy.URL = &urlCopy | ||||||
|  | 	copy.Header = http.Header{} // Copy the header
 | ||||||
|  | 	for k, vs := range r.Header { | ||||||
|  | 		for _, value := range vs { | ||||||
|  | 			copy.Header.Add(k, value) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return Request{Request: ©} // Return the copy
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r Request) close() error { | ||||||
|  | 	if r.Body != nil && r.Body != http.NoBody { | ||||||
|  | 		c, ok := r.Body.(*retryableRequestBody) | ||||||
|  | 		if !ok { | ||||||
|  | 			panic("unexpected request body type (should be *retryableReadSeekerCloser)") | ||||||
|  | 		} | ||||||
|  | 		return c.realClose() | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
 | ||||||
|  | func (r Request) RewindBody() error { | ||||||
|  | 	if r.Body != nil && r.Body != http.NoBody { | ||||||
|  | 		s, ok := r.Body.(io.Seeker) | ||||||
|  | 		if !ok { | ||||||
|  | 			panic("unexpected request body type (should be io.Seeker)") | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Reset the stream back to the beginning
 | ||||||
|  | 		_, err := s.Seek(0, io.SeekStart) | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
 | ||||||
|  | 
 | ||||||
|  | // This struct is used when sending a body to the network
 | ||||||
|  | type retryableRequestBody struct { | ||||||
|  | 	body io.ReadSeeker // Seeking is required to support retries
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read reads a block of data from an inner stream and reports progress
 | ||||||
|  | func (b *retryableRequestBody) Read(p []byte) (n int, err error) { | ||||||
|  | 	return b.body.Read(p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { | ||||||
|  | 	return b.body.Seek(offset, whence) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *retryableRequestBody) Close() error { | ||||||
|  | 	// We don't want the underlying transport to close the request body on transient failures so this is a nop.
 | ||||||
|  | 	// The pipeline closes the request body upon success.
 | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *retryableRequestBody) realClose() error { | ||||||
|  | 	if c, ok := b.body.(io.Closer); ok { | ||||||
|  | 		return c.Close() | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										74
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							
							
						
						
									
										74
									
								
								vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
								
								
									generated
								
								
									vendored
								
								
									Executable file
								
							|  | @ -0,0 +1,74 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"net/http" | ||||||
|  | 	"sort" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
 | ||||||
|  | // This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
 | ||||||
|  | // might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
 | ||||||
|  | // The method that injected the method-specific Factory gets this returned Response and performs a type assertion
 | ||||||
|  | // to the expected struct and returns the struct to its caller.
 | ||||||
|  | type Response interface { | ||||||
|  | 	Response() *http.Response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This is the default struct that has the http.Response.
 | ||||||
|  | // A method can replace this struct with its own struct containing an http.Response
 | ||||||
|  | // field and any other additional fields.
 | ||||||
|  | type httpResponse struct { | ||||||
|  | 	response *http.Response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewHTTPResponse is typically called by a Policy object to return a Response object.
 | ||||||
|  | func NewHTTPResponse(response *http.Response) Response { | ||||||
|  | 	return &httpResponse{response: response} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // This method satisfies the public Response interface's Response method
 | ||||||
|  | func (r httpResponse) Response() *http.Response { | ||||||
|  | 	return r.response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
 | ||||||
|  | // not nil, then these are also written into the Buffer.
 | ||||||
|  | func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { | ||||||
|  | 	// Write the request into the buffer.
 | ||||||
|  | 	fmt.Fprint(b, "   "+request.Method+" "+request.URL.String()+"\n") | ||||||
|  | 	writeHeader(b, request.Header) | ||||||
|  | 	if response != nil { | ||||||
|  | 		fmt.Fprintln(b, "   --------------------------------------------------------------------------------") | ||||||
|  | 		fmt.Fprint(b, "   RESPONSE Status: "+response.Status+"\n") | ||||||
|  | 		writeHeader(b, response.Header) | ||||||
|  | 	} | ||||||
|  | 	if err != nil { | ||||||
|  | 		fmt.Fprintln(b, "   --------------------------------------------------------------------------------") | ||||||
|  | 		fmt.Fprint(b, "   ERROR:\n"+err.Error()+"\n") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // formatHeaders appends an HTTP request's or response's header into a Buffer.
 | ||||||
|  | func writeHeader(b *bytes.Buffer, header map[string][]string) { | ||||||
|  | 	if len(header) == 0 { | ||||||
|  | 		b.WriteString("   (no headers)\n") | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	keys := make([]string, 0, len(header)) | ||||||
|  | 	// Alphabetize the headers
 | ||||||
|  | 	for k := range header { | ||||||
|  | 		keys = append(keys, k) | ||||||
|  | 	} | ||||||
|  | 	sort.Strings(keys) | ||||||
|  | 	for _, k := range keys { | ||||||
|  | 		// Redact the value of any Authorization header to prevent security information from persisting in logs
 | ||||||
|  | 		value := interface{}("REDACTED") | ||||||
|  | 		if !strings.EqualFold(k, "Authorization") { | ||||||
|  | 			value = header[k] | ||||||
|  | 		} | ||||||
|  | 		fmt.Fprintf(b, "   %s: %+v\n", k, value) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -0,0 +1,9 @@ | ||||||
|  | package pipeline | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// UserAgent is the string to be used in the user agent string when making requests.
 | ||||||
|  | 	UserAgent = "azure-pipeline-go/" + Version | ||||||
|  | 
 | ||||||
|  | 	// Version is the semantic version (see http://semver.org) of the pipeline package.
 | ||||||
|  | 	Version = "0.2.1" | ||||||
|  | ) | ||||||
|  | @ -0,0 +1,21 @@ | ||||||
|  |     MIT License | ||||||
|  | 
 | ||||||
|  |     Copyright (c) Microsoft Corporation. All rights reserved. | ||||||
|  | 
 | ||||||
|  |     Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  |     of this software and associated documentation files (the "Software"), to deal | ||||||
|  |     in the Software without restriction, including without limitation the rights | ||||||
|  |     to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  |     copies of the Software, and to permit persons to whom the Software is | ||||||
|  |     furnished to do so, subject to the following conditions: | ||||||
|  | 
 | ||||||
|  |     The above copyright notice and this permission notice shall be included in all | ||||||
|  |     copies or substantial portions of the Software. | ||||||
|  | 
 | ||||||
|  |     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  |     AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  |     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  |     OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  |     SOFTWARE | ||||||
							
								
								
									
										65
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										65
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,65 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
 | ||||||
|  | type ModifiedAccessConditions struct { | ||||||
|  | 	IfModifiedSince   time.Time | ||||||
|  | 	IfUnmodifiedSince time.Time | ||||||
|  | 	IfMatch           ETag | ||||||
|  | 	IfNoneMatch       ETag | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // pointers is for internal infrastructure. It returns the fields as pointers.
 | ||||||
|  | func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { | ||||||
|  | 	if !ac.IfModifiedSince.IsZero() { | ||||||
|  | 		ims = &ac.IfModifiedSince | ||||||
|  | 	} | ||||||
|  | 	if !ac.IfUnmodifiedSince.IsZero() { | ||||||
|  | 		ius = &ac.IfUnmodifiedSince | ||||||
|  | 	} | ||||||
|  | 	if ac.IfMatch != ETagNone { | ||||||
|  | 		ime = &ac.IfMatch | ||||||
|  | 	} | ||||||
|  | 	if ac.IfNoneMatch != ETagNone { | ||||||
|  | 		inme = &ac.IfNoneMatch | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContainerAccessConditions identifies container-specific access conditions which you optionally set.
 | ||||||
|  | type ContainerAccessConditions struct { | ||||||
|  | 	ModifiedAccessConditions | ||||||
|  | 	LeaseAccessConditions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobAccessConditions identifies blob-specific access conditions which you optionally set.
 | ||||||
|  | type BlobAccessConditions struct { | ||||||
|  | 	ModifiedAccessConditions | ||||||
|  | 	LeaseAccessConditions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
 | ||||||
|  | type LeaseAccessConditions struct { | ||||||
|  | 	LeaseID string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // pointers is for internal infrastructure. It returns the fields as pointers.
 | ||||||
|  | func (ac LeaseAccessConditions) pointers() (leaseID *string) { | ||||||
|  | 	if ac.LeaseID != "" { | ||||||
|  | 		leaseID = &ac.LeaseID | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | // getInt32 is for internal infrastructure. It is used with access condition values where
 | ||||||
|  | // 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
 | ||||||
|  | // and the privately-storage field in the access condition object is stored as +1 higher than desired.
 | ||||||
|  | // THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
 | ||||||
|  | func getInt32(value int32) (bool, int32) { | ||||||
|  | 	return value > 0, value - 1 | ||||||
|  | } | ||||||
|  | */ | ||||||
							
								
								
									
										69
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										69
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,69 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import "sync/atomic" | ||||||
|  | 
 | ||||||
|  | // AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function.
 | ||||||
|  | // The AtomicMorpher callback is passed a startValue and based on this value it returns
 | ||||||
|  | // what the new value should be and the result that AtomicMorph should return to its caller.
 | ||||||
|  | type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{}) | ||||||
|  | 
 | ||||||
|  | const targetAndMorpherMustNotBeNil = "target and morpher must not be nil" | ||||||
|  | 
 | ||||||
|  | // AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
 | ||||||
|  | func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} { | ||||||
|  | 	for { | ||||||
|  | 		currentVal := atomic.LoadInt32(target) | ||||||
|  | 		desiredVal, morphResult := morpher(currentVal) | ||||||
|  | 		if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) { | ||||||
|  | 			return morphResult | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function.
 | ||||||
|  | // The AtomicMorpher callback is passed a startValue and based on this value it returns
 | ||||||
|  | // what the new value should be and the result that AtomicMorph should return to its caller.
 | ||||||
|  | type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{}) | ||||||
|  | 
 | ||||||
|  | // AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
 | ||||||
|  | func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} { | ||||||
|  | 	for { | ||||||
|  | 		currentVal := atomic.LoadUint32(target) | ||||||
|  | 		desiredVal, morphResult := morpher(currentVal) | ||||||
|  | 		if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { | ||||||
|  | 			return morphResult | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
 | ||||||
|  | // The AtomicMorpher callback is passed a startValue and based on this value it returns
 | ||||||
|  | // what the new value should be and the result that AtomicMorph should return to its caller.
 | ||||||
|  | type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}) | ||||||
|  | 
 | ||||||
|  | // AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
 | ||||||
|  | func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} { | ||||||
|  | 	for { | ||||||
|  | 		currentVal := atomic.LoadInt64(target) | ||||||
|  | 		desiredVal, morphResult := morpher(currentVal) | ||||||
|  | 		if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) { | ||||||
|  | 			return morphResult | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function.
 | ||||||
|  | // The AtomicMorpher callback is passed a startValue and based on this value it returns
 | ||||||
|  | // what the new value should be and the result that AtomicMorph should return to its caller.
 | ||||||
|  | type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{}) | ||||||
|  | 
 | ||||||
|  | // AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function.
 | ||||||
|  | func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} { | ||||||
|  | 	for { | ||||||
|  | 		currentVal := atomic.LoadUint64(target) | ||||||
|  | 		desiredVal, morphResult := morpher(currentVal) | ||||||
|  | 		if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) { | ||||||
|  | 			return morphResult | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										543
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										543
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,543 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"io" | ||||||
|  | 	"net/http" | ||||||
|  | 
 | ||||||
|  | 	"bytes" | ||||||
|  | 	"os" | ||||||
|  | 	"sync" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"errors" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // CommonResponse returns the headers common to all blob REST API responses.
 | ||||||
|  | type CommonResponse interface { | ||||||
|  | 	// ETag returns the value for header ETag.
 | ||||||
|  | 	ETag() ETag | ||||||
|  | 
 | ||||||
|  | 	// LastModified returns the value for header Last-Modified.
 | ||||||
|  | 	LastModified() time.Time | ||||||
|  | 
 | ||||||
|  | 	// RequestID returns the value for header x-ms-request-id.
 | ||||||
|  | 	RequestID() string | ||||||
|  | 
 | ||||||
|  | 	// Date returns the value for header Date.
 | ||||||
|  | 	Date() time.Time | ||||||
|  | 
 | ||||||
|  | 	// Version returns the value for header x-ms-version.
 | ||||||
|  | 	Version() string | ||||||
|  | 
 | ||||||
|  | 	// Response returns the raw HTTP response object.
 | ||||||
|  | 	Response() *http.Response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
 | ||||||
|  | type UploadToBlockBlobOptions struct { | ||||||
|  | 	// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
 | ||||||
|  | 	BlockSize int64 | ||||||
|  | 
 | ||||||
|  | 	// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
 | ||||||
|  | 	// Note that the progress reporting is not always increasing; it can go down when retrying a request.
 | ||||||
|  | 	Progress pipeline.ProgressReceiver | ||||||
|  | 
 | ||||||
|  | 	// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
 | ||||||
|  | 	BlobHTTPHeaders BlobHTTPHeaders | ||||||
|  | 
 | ||||||
|  | 	// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
 | ||||||
|  | 	Metadata Metadata | ||||||
|  | 
 | ||||||
|  | 	// AccessConditions indicates the access conditions for the block blob.
 | ||||||
|  | 	AccessConditions BlobAccessConditions | ||||||
|  | 
 | ||||||
|  | 	// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
 | ||||||
|  | 	Parallelism uint16 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
 | ||||||
|  | func UploadBufferToBlockBlob(ctx context.Context, b []byte, | ||||||
|  | 	blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { | ||||||
|  | 	bufferSize := int64(len(b)) | ||||||
|  | 	if o.BlockSize == 0 { | ||||||
|  | 		// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
 | ||||||
|  | 		if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { | ||||||
|  | 			return nil, errors.New("buffer is too large to upload to a block blob") | ||||||
|  | 		} | ||||||
|  | 		// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
 | ||||||
|  | 		if bufferSize <= BlockBlobMaxUploadBlobBytes { | ||||||
|  | 			o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
 | ||||||
|  | 		} else { | ||||||
|  | 			o.BlockSize = bufferSize / BlockBlobMaxBlocks   // buffer / max blocks = block size to use all 50,000 blocks
 | ||||||
|  | 			if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
 | ||||||
|  | 				o.BlockSize = BlobDefaultDownloadBlockSize | ||||||
|  | 			} | ||||||
|  | 			// StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize).
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if bufferSize <= BlockBlobMaxUploadBlobBytes { | ||||||
|  | 		// If the size can fit in 1 Upload call, do it this way
 | ||||||
|  | 		var body io.ReadSeeker = bytes.NewReader(b) | ||||||
|  | 		if o.Progress != nil { | ||||||
|  | 			body = pipeline.NewRequestBodyProgress(body, o.Progress) | ||||||
|  | 		} | ||||||
|  | 		return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1) | ||||||
|  | 
 | ||||||
|  | 	blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
 | ||||||
|  | 	progress := int64(0) | ||||||
|  | 	progressLock := &sync.Mutex{} | ||||||
|  | 
 | ||||||
|  | 	err := DoBatchTransfer(ctx, BatchTransferOptions{ | ||||||
|  | 		OperationName: "UploadBufferToBlockBlob", | ||||||
|  | 		TransferSize:  bufferSize, | ||||||
|  | 		ChunkSize:     o.BlockSize, | ||||||
|  | 		Parallelism:   o.Parallelism, | ||||||
|  | 		Operation: func(offset int64, count int64, ctx context.Context) error { | ||||||
|  | 			// This function is called once per block.
 | ||||||
|  | 			// It is passed this block's offset within the buffer and its count of bytes
 | ||||||
|  | 			// Prepare to read the proper block/section of the buffer
 | ||||||
|  | 			var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) | ||||||
|  | 			blockNum := offset / o.BlockSize | ||||||
|  | 			if o.Progress != nil { | ||||||
|  | 				blockProgress := int64(0) | ||||||
|  | 				body = pipeline.NewRequestBodyProgress(body, | ||||||
|  | 					func(bytesTransferred int64) { | ||||||
|  | 						diff := bytesTransferred - blockProgress | ||||||
|  | 						blockProgress = bytesTransferred | ||||||
|  | 						progressLock.Lock() // 1 goroutine at a time gets a progress report
 | ||||||
|  | 						progress += diff | ||||||
|  | 						o.Progress(progress) | ||||||
|  | 						progressLock.Unlock() | ||||||
|  | 					}) | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
 | ||||||
|  | 			// at the same time causing PutBlockList to get a mix of blocks from all the clients.
 | ||||||
|  | 			blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) | ||||||
|  | 			_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil) | ||||||
|  | 			return err | ||||||
|  | 		}, | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	// All put blocks were successful, call Put Block List to finalize the blob
 | ||||||
|  | 	return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadFileToBlockBlob uploads a file in blocks to a block blob.
 | ||||||
|  | func UploadFileToBlockBlob(ctx context.Context, file *os.File, | ||||||
|  | 	blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { | ||||||
|  | 
 | ||||||
|  | 	stat, err := file.Stat() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	m := mmf{} // Default to an empty slice; used for 0-size file
 | ||||||
|  | 	if stat.Size() != 0 { | ||||||
|  | 		m, err = newMMF(file, false, 0, int(stat.Size())) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		defer m.unmap() | ||||||
|  | 	} | ||||||
|  | 	return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
 | ||||||
|  | 
 | ||||||
|  | // DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
 | ||||||
|  | type DownloadFromBlobOptions struct { | ||||||
|  | 	// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
 | ||||||
|  | 	BlockSize int64 | ||||||
|  | 
 | ||||||
|  | 	// Progress is a function that is invoked periodically as bytes are received.
 | ||||||
|  | 	Progress pipeline.ProgressReceiver | ||||||
|  | 
 | ||||||
|  | 	// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
 | ||||||
|  | 	AccessConditions BlobAccessConditions | ||||||
|  | 
 | ||||||
|  | 	// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
 | ||||||
|  | 	Parallelism uint16 | ||||||
|  | 
 | ||||||
|  | 	// RetryReaderOptionsPerBlock is used when downloading each block.
 | ||||||
|  | 	RetryReaderOptionsPerBlock RetryReaderOptions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // downloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
 | ||||||
|  | func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, | ||||||
|  | 	b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { | ||||||
|  | 	if o.BlockSize == 0 { | ||||||
|  | 		o.BlockSize = BlobDefaultDownloadBlockSize | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if count == CountToEnd { // If size not specified, calculate it
 | ||||||
|  | 		if initialDownloadResponse != nil { | ||||||
|  | 			count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
 | ||||||
|  | 		} else { | ||||||
|  | 			// If we don't have the length at all, get it
 | ||||||
|  | 			dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			count = dr.ContentLength() - offset | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare and do parallel download.
 | ||||||
|  | 	progress := int64(0) | ||||||
|  | 	progressLock := &sync.Mutex{} | ||||||
|  | 
 | ||||||
|  | 	err := DoBatchTransfer(ctx, BatchTransferOptions{ | ||||||
|  | 		OperationName: "downloadBlobToBuffer", | ||||||
|  | 		TransferSize:  count, | ||||||
|  | 		ChunkSize:     o.BlockSize, | ||||||
|  | 		Parallelism:   o.Parallelism, | ||||||
|  | 		Operation: func(chunkStart int64, count int64, ctx context.Context) error { | ||||||
|  | 			dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			body := dr.Body(o.RetryReaderOptionsPerBlock) | ||||||
|  | 			if o.Progress != nil { | ||||||
|  | 				rangeProgress := int64(0) | ||||||
|  | 				body = pipeline.NewResponseBodyProgress( | ||||||
|  | 					body, | ||||||
|  | 					func(bytesTransferred int64) { | ||||||
|  | 						diff := bytesTransferred - rangeProgress | ||||||
|  | 						rangeProgress = bytesTransferred | ||||||
|  | 						progressLock.Lock() | ||||||
|  | 						progress += diff | ||||||
|  | 						o.Progress(progress) | ||||||
|  | 						progressLock.Unlock() | ||||||
|  | 					}) | ||||||
|  | 			} | ||||||
|  | 			_, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) | ||||||
|  | 			body.Close() | ||||||
|  | 			return err | ||||||
|  | 		}, | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
 | ||||||
|  | // Offset and count are optional, pass 0 for both to download the entire blob.
 | ||||||
|  | func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, | ||||||
|  | 	b []byte, o DownloadFromBlobOptions) error { | ||||||
|  | 	return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DownloadBlobToFile downloads an Azure blob to a local file.
 | ||||||
|  | // The file would be truncated if the size doesn't match.
 | ||||||
|  | // Offset and count are optional, pass 0 for both to download the entire blob.
 | ||||||
|  | func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, | ||||||
|  | 	file *os.File, o DownloadFromBlobOptions) error { | ||||||
|  | 	// 1. Calculate the size of the destination file
 | ||||||
|  | 	var size int64 | ||||||
|  | 
 | ||||||
|  | 	if count == CountToEnd { | ||||||
|  | 		// Try to get Azure blob's size
 | ||||||
|  | 		props, err := blobURL.GetProperties(ctx, o.AccessConditions) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		size = props.ContentLength() - offset | ||||||
|  | 	} else { | ||||||
|  | 		size = count | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
 | ||||||
|  | 	stat, err := file.Stat() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if stat.Size() != size { | ||||||
|  | 		if err = file.Truncate(size); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if size > 0 { | ||||||
|  | 		// 3. Set mmap and call downloadBlobToBuffer.
 | ||||||
|  | 		m, err := newMMF(file, true, 0, int(size)) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		defer m.unmap() | ||||||
|  | 		return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil) | ||||||
|  | 	} else { // if the blob's size is 0, there is no need in downloading it
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | // BatchTransferOptions identifies options used by DoBatchTransfer.
 | ||||||
|  | type BatchTransferOptions struct { | ||||||
|  | 	TransferSize  int64 | ||||||
|  | 	ChunkSize     int64 | ||||||
|  | 	Parallelism   uint16 | ||||||
|  | 	Operation     func(offset int64, chunkSize int64, ctx context.Context) error | ||||||
|  | 	OperationName string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DoBatchTransfer helps to execute operations in a batch manner.
 | ||||||
|  | // Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
 | ||||||
|  | func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { | ||||||
|  | 	if o.ChunkSize == 0 { | ||||||
|  | 		return errors.New("ChunkSize cannot be 0") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Prepare and do parallel operations.
 | ||||||
|  | 	numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) | ||||||
|  | 	operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
 | ||||||
|  | 	operationResponseChannel := make(chan error, numChunks)    // Holds each response
 | ||||||
|  | 	ctx, cancel := context.WithCancel(ctx) | ||||||
|  | 	defer cancel() | ||||||
|  | 
 | ||||||
|  | 	// Create the goroutines that process each operation (in parallel).
 | ||||||
|  | 	if o.Parallelism == 0 { | ||||||
|  | 		o.Parallelism = 5 // default Parallelism
 | ||||||
|  | 	} | ||||||
|  | 	for g := uint16(0); g < o.Parallelism; g++ { | ||||||
|  | 		//grIndex := g
 | ||||||
|  | 		go func() { | ||||||
|  | 			for f := range operationChannel { | ||||||
|  | 				err := f() | ||||||
|  | 				operationResponseChannel <- err | ||||||
|  | 			} | ||||||
|  | 		}() | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Add each chunk's operation to the channel.
 | ||||||
|  | 	for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { | ||||||
|  | 		curChunkSize := o.ChunkSize | ||||||
|  | 
 | ||||||
|  | 		if chunkNum == numChunks-1 { // Last chunk
 | ||||||
|  | 			curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
 | ||||||
|  | 		} | ||||||
|  | 		offset := int64(chunkNum) * o.ChunkSize | ||||||
|  | 
 | ||||||
|  | 		operationChannel <- func() error { | ||||||
|  | 			return o.Operation(offset, curChunkSize, ctx) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	close(operationChannel) | ||||||
|  | 
 | ||||||
|  | 	// Wait for the operations to complete.
 | ||||||
|  | 	var firstErr error = nil | ||||||
|  | 	for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { | ||||||
|  | 		responseError := <-operationResponseChannel | ||||||
|  | 		// record the first error (the original error which should cause the other chunks to fail with canceled context)
 | ||||||
|  | 		if responseError != nil && firstErr == nil { | ||||||
|  | 			cancel() // As soon as any operation fails, cancel all remaining operation calls
 | ||||||
|  | 			firstErr = responseError | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return firstErr | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | type UploadStreamToBlockBlobOptions struct { | ||||||
|  | 	BufferSize       int | ||||||
|  | 	MaxBuffers       int | ||||||
|  | 	BlobHTTPHeaders  BlobHTTPHeaders | ||||||
|  | 	Metadata         Metadata | ||||||
|  | 	AccessConditions BlobAccessConditions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, | ||||||
|  | 	o UploadStreamToBlockBlobOptions) (CommonResponse, error) { | ||||||
|  | 	result, err := uploadStream(ctx, reader, | ||||||
|  | 		UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers}, | ||||||
|  | 		&uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return result.(CommonResponse), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type uploadStreamToBlockBlobOptions struct { | ||||||
|  | 	b             BlockBlobURL | ||||||
|  | 	o             UploadStreamToBlockBlobOptions | ||||||
|  | 	blockIDPrefix uuid   // UUID used with all blockIDs
 | ||||||
|  | 	maxBlockNum   uint32 // defaults to 0
 | ||||||
|  | 	firstBlock    []byte // Used only if maxBlockNum is 0
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) { | ||||||
|  | 	return nil, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error { | ||||||
|  | 	if num == 0 { | ||||||
|  | 		t.firstBlock = buffer | ||||||
|  | 
 | ||||||
|  | 		// If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation
 | ||||||
|  | 		// If the payload is exactly the same size as the buffer, there may be more content coming in.
 | ||||||
|  | 		if len(buffer) < t.o.BufferSize { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// Else, upload a staged block...
 | ||||||
|  | 	atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) { | ||||||
|  | 		// Atomically remember (in t.numBlocks) the maximum block num we've ever seen
 | ||||||
|  | 		if startVal < num { | ||||||
|  | 			return num, nil | ||||||
|  | 		} | ||||||
|  | 		return startVal, nil | ||||||
|  | 	}) | ||||||
|  | 	blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64() | ||||||
|  | 	_, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) { | ||||||
|  | 	// If the first block had the exact same size as the buffer
 | ||||||
|  | 	// we would have staged it as a block thinking that there might be more data coming
 | ||||||
|  | 	if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize { | ||||||
|  | 		// If whole payload fits in 1 block (block #0), upload it with 1 I/O operation
 | ||||||
|  | 		return t.b.Upload(ctx, bytes.NewReader(t.firstBlock), | ||||||
|  | 			t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) | ||||||
|  | 	} | ||||||
|  | 	// Multiple blocks staged, commit them all now
 | ||||||
|  | 	blockID := newUuidBlockID(t.blockIDPrefix) | ||||||
|  | 	blockIDs := make([]string, t.maxBlockNum+1) | ||||||
|  | 	for bn := uint32(0); bn <= t.maxBlockNum; bn++ { | ||||||
|  | 		blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() | ||||||
|  | 	} | ||||||
|  | 	return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ////////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | type iTransfer interface { | ||||||
|  | 	start(ctx context.Context) (interface{}, error) | ||||||
|  | 	chunk(ctx context.Context, num uint32, buffer []byte) error | ||||||
|  | 	end(ctx context.Context) (interface{}, error) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type UploadStreamOptions struct { | ||||||
|  | 	MaxBuffers int | ||||||
|  | 	BufferSize int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type firstErr struct { | ||||||
|  | 	lock       sync.Mutex | ||||||
|  | 	finalError error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (fe *firstErr) set(err error) { | ||||||
|  | 	fe.lock.Lock() | ||||||
|  | 	if fe.finalError == nil { | ||||||
|  | 		fe.finalError = err | ||||||
|  | 	} | ||||||
|  | 	fe.lock.Unlock() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (fe *firstErr) get() (err error) { | ||||||
|  | 	fe.lock.Lock() | ||||||
|  | 	err = fe.finalError | ||||||
|  | 	fe.lock.Unlock() | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) { | ||||||
|  | 	firstErr := firstErr{} | ||||||
|  | 	ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything
 | ||||||
|  | 	defer cancel() | ||||||
|  | 	wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing
 | ||||||
|  | 	type OutgoingMsg struct { | ||||||
|  | 		chunkNum uint32 | ||||||
|  | 		buffer   []byte | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Create a channel to hold the buffers usable for incoming datsa
 | ||||||
|  | 	incoming := make(chan []byte, o.MaxBuffers) | ||||||
|  | 	outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers
 | ||||||
|  | 	if result, err := t.start(ctx); err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	numBuffers := 0 // The number of buffers & out going goroutines created so far
 | ||||||
|  | 	injectBuffer := func() { | ||||||
|  | 		// For each Buffer, create it and a goroutine to upload it
 | ||||||
|  | 		incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it
 | ||||||
|  | 		numBuffers++ | ||||||
|  | 		go func() { | ||||||
|  | 			for outgoingMsg := range outgoing { | ||||||
|  | 				// Upload the outgoing buffer
 | ||||||
|  | 				err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer) | ||||||
|  | 				wg.Done() // Indicate this buffer was sent
 | ||||||
|  | 				if nil != err { | ||||||
|  | 					// NOTE: finalErr could be assigned to multiple times here which is OK,
 | ||||||
|  | 					// some error will be returned.
 | ||||||
|  | 					firstErr.set(err) | ||||||
|  | 					cancel() | ||||||
|  | 				} | ||||||
|  | 				incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now
 | ||||||
|  | 			} | ||||||
|  | 		}() | ||||||
|  | 	} | ||||||
|  | 	injectBuffer() // Create our 1st buffer & outgoing goroutine
 | ||||||
|  | 
 | ||||||
|  | 	// This goroutine grabs a buffer, reads from the stream into the buffer,
 | ||||||
|  | 	// and inserts the buffer into the outgoing channel to be uploaded
 | ||||||
|  | 	for c := uint32(0); true; c++ { // Iterate once per chunk
 | ||||||
|  | 		var buffer []byte | ||||||
|  | 		if numBuffers < o.MaxBuffers { | ||||||
|  | 			select { | ||||||
|  | 			// We're not at max buffers, see if a previously-created buffer is available
 | ||||||
|  | 			case buffer = <-incoming: | ||||||
|  | 				break | ||||||
|  | 			default: | ||||||
|  | 				// No buffer available; inject a new buffer & go routine to process it
 | ||||||
|  | 				injectBuffer() | ||||||
|  | 				buffer = <-incoming // Grab the just-injected buffer
 | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			// We are at max buffers, block until we get to reuse one
 | ||||||
|  | 			buffer = <-incoming | ||||||
|  | 		} | ||||||
|  | 		n, err := io.ReadFull(reader, buffer) | ||||||
|  | 		if err != nil { // Less than len(buffer) bytes were read
 | ||||||
|  | 			buffer = buffer[:n] // Make slice match the # of read bytes
 | ||||||
|  | 		} | ||||||
|  | 		if len(buffer) > 0 { | ||||||
|  | 			// Buffer not empty, upload it
 | ||||||
|  | 			wg.Add(1) // We're posting a buffer to be sent
 | ||||||
|  | 			outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer} | ||||||
|  | 		} | ||||||
|  | 		if err != nil { // The reader is done, no more outgoing buffers
 | ||||||
|  | 			if err == io.EOF || err == io.ErrUnexpectedEOF { | ||||||
|  | 				err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF
 | ||||||
|  | 			} else { | ||||||
|  | 				firstErr.set(err) | ||||||
|  | 			} | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done
 | ||||||
|  | 	close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty
 | ||||||
|  | 	wg.Wait()       // Wait for all pending outgoing messages to complete
 | ||||||
|  | 	err := firstErr.get() | ||||||
|  | 	if err == nil { | ||||||
|  | 		// If no error, after all blocks uploaded, commit them to the blob & return the result
 | ||||||
|  | 		return t.end(ctx) | ||||||
|  | 	} | ||||||
|  | 	return nil, err | ||||||
|  | } | ||||||
							
								
								
									
										153
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										153
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,153 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"net" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	snapshot           = "snapshot" | ||||||
|  | 	SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
 | ||||||
|  | // existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
 | ||||||
|  | // NOTE: Changing any SAS-related field requires computing a new SAS signature.
 | ||||||
|  | type BlobURLParts struct { | ||||||
|  | 	Scheme              string // Ex: "https://"
 | ||||||
|  | 	Host                string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
 | ||||||
|  | 	IPEndpointStyleInfo IPEndpointStyleInfo | ||||||
|  | 	ContainerName       string // "" if no container
 | ||||||
|  | 	BlobName            string // "" if no blob
 | ||||||
|  | 	Snapshot            string // "" if not a snapshot
 | ||||||
|  | 	SAS                 SASQueryParameters | ||||||
|  | 	UnparsedParams      string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
 | ||||||
|  | // Ex: "https://10.132.141.33/accountname/containername"
 | ||||||
|  | type IPEndpointStyleInfo struct { | ||||||
|  | 	AccountName string // "" if not using IP endpoint style
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
 | ||||||
|  | // http(s)://IP(:port)/storageaccount/container/...
 | ||||||
|  | // As url's Host property, host could be both host or host:port
 | ||||||
|  | func isIPEndpointStyle(host string) bool { | ||||||
|  | 	if host == "" { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	if h, _, err := net.SplitHostPort(host); err == nil { | ||||||
|  | 		host = h | ||||||
|  | 	} | ||||||
|  | 	// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
 | ||||||
|  | 	// In this case, eliminate the '[' and ']' in the URL.
 | ||||||
|  | 	// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
 | ||||||
|  | 	if host[0] == '[' && host[len(host)-1] == ']' { | ||||||
|  | 		host = host[1 : len(host)-1] | ||||||
|  | 	} | ||||||
|  | 	return net.ParseIP(host) != nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
 | ||||||
|  | // query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
 | ||||||
|  | func NewBlobURLParts(u url.URL) BlobURLParts { | ||||||
|  | 	up := BlobURLParts{ | ||||||
|  | 		Scheme: u.Scheme, | ||||||
|  | 		Host:   u.Host, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Find the container & blob names (if any)
 | ||||||
|  | 	if u.Path != "" { | ||||||
|  | 		path := u.Path | ||||||
|  | 		if path[0] == '/' { | ||||||
|  | 			path = path[1:] // If path starts with a slash, remove it
 | ||||||
|  | 		} | ||||||
|  | 		if isIPEndpointStyle(up.Host) { | ||||||
|  | 			if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
 | ||||||
|  | 				up.IPEndpointStyleInfo.AccountName = path | ||||||
|  | 			} else { | ||||||
|  | 				up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
 | ||||||
|  | 				path = path[accountEndIndex+1:]                             // path refers to portion after the account name now (container & blob names)
 | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
 | ||||||
|  | 		if containerEndIndex == -1 {                  // Slash not found; path has container name & no blob name
 | ||||||
|  | 			up.ContainerName = path | ||||||
|  | 		} else { | ||||||
|  | 			up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
 | ||||||
|  | 			up.BlobName = path[containerEndIndex+1:]    // The blob name is after the container slash
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Convert the query parameters to a case-sensitive map & trim whitespace
 | ||||||
|  | 	paramsMap := u.Query() | ||||||
|  | 
 | ||||||
|  | 	up.Snapshot = "" // Assume no snapshot
 | ||||||
|  | 	if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { | ||||||
|  | 		up.Snapshot = snapshotStr[0] | ||||||
|  | 		// If we recognized the query parameter, remove it from the map
 | ||||||
|  | 		delete(paramsMap, snapshot) | ||||||
|  | 	} | ||||||
|  | 	up.SAS = newSASQueryParameters(paramsMap, true) | ||||||
|  | 	up.UnparsedParams = paramsMap.Encode() | ||||||
|  | 	return up | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type caseInsensitiveValues url.Values // map[string][]string
 | ||||||
|  | func (values caseInsensitiveValues) Get(key string) ([]string, bool) { | ||||||
|  | 	key = strings.ToLower(key) | ||||||
|  | 	for k, v := range values { | ||||||
|  | 		if strings.ToLower(k) == key { | ||||||
|  | 			return v, true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return []string{}, false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
 | ||||||
|  | // field contains the SAS, snapshot, and unparsed query parameters.
 | ||||||
|  | func (up BlobURLParts) URL() url.URL { | ||||||
|  | 	path := "" | ||||||
|  | 	if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { | ||||||
|  | 		path += "/" + up.IPEndpointStyleInfo.AccountName | ||||||
|  | 	} | ||||||
|  | 	// Concatenate container & blob names (if they exist)
 | ||||||
|  | 	if up.ContainerName != "" { | ||||||
|  | 		path += "/" + up.ContainerName | ||||||
|  | 		if up.BlobName != "" { | ||||||
|  | 			path += "/" + up.BlobName | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	rawQuery := up.UnparsedParams | ||||||
|  | 
 | ||||||
|  | 	//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
 | ||||||
|  | 	if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { | ||||||
|  | 		up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Concatenate blob snapshot query parameter (if it exists)
 | ||||||
|  | 	if up.Snapshot != "" { | ||||||
|  | 		if len(rawQuery) > 0 { | ||||||
|  | 			rawQuery += "&" | ||||||
|  | 		} | ||||||
|  | 		rawQuery += snapshot + "=" + up.Snapshot | ||||||
|  | 	} | ||||||
|  | 	sas := up.SAS.Encode() | ||||||
|  | 	if sas != "" { | ||||||
|  | 		if len(rawQuery) > 0 { | ||||||
|  | 			rawQuery += "&" | ||||||
|  | 		} | ||||||
|  | 		rawQuery += sas | ||||||
|  | 	} | ||||||
|  | 	u := url.URL{ | ||||||
|  | 		Scheme:   up.Scheme, | ||||||
|  | 		Host:     up.Host, | ||||||
|  | 		Path:     path, | ||||||
|  | 		RawQuery: rawQuery, | ||||||
|  | 	} | ||||||
|  | 	return u | ||||||
|  | } | ||||||
							
								
								
									
										256
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										256
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,256 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
 | ||||||
|  | type BlobSASSignatureValues struct { | ||||||
|  | 	Version            string      `param:"sv"`  // If not specified, this defaults to SASVersion
 | ||||||
|  | 	Protocol           SASProtocol `param:"spr"` // See the SASProtocol* constants
 | ||||||
|  | 	StartTime          time.Time   `param:"st"`  // Not specified if IsZero
 | ||||||
|  | 	ExpiryTime         time.Time   `param:"se"`  // Not specified if IsZero
 | ||||||
|  | 	SnapshotTime       time.Time | ||||||
|  | 	Permissions        string  `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
 | ||||||
|  | 	IPRange            IPRange `param:"sip"` | ||||||
|  | 	Identifier         string  `param:"si"` | ||||||
|  | 	ContainerName      string | ||||||
|  | 	BlobName           string // Use "" to create a Container SAS
 | ||||||
|  | 	CacheControl       string // rscc
 | ||||||
|  | 	ContentDisposition string // rscd
 | ||||||
|  | 	ContentEncoding    string // rsce
 | ||||||
|  | 	ContentLanguage    string // rscl
 | ||||||
|  | 	ContentType        string // rsct
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
 | ||||||
|  | // the proper SAS query parameters.
 | ||||||
|  | // See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
 | ||||||
|  | func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { | ||||||
|  | 	resource := "c" | ||||||
|  | 	if credential == nil { | ||||||
|  | 		return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if !v.SnapshotTime.IsZero() { | ||||||
|  | 		resource = "bs" | ||||||
|  | 		//Make sure the permission characters are in the correct order
 | ||||||
|  | 		perms := &BlobSASPermissions{} | ||||||
|  | 		if err := perms.Parse(v.Permissions); err != nil { | ||||||
|  | 			return SASQueryParameters{}, err | ||||||
|  | 		} | ||||||
|  | 		v.Permissions = perms.String() | ||||||
|  | 	} else if v.BlobName == "" { | ||||||
|  | 		// Make sure the permission characters are in the correct order
 | ||||||
|  | 		perms := &ContainerSASPermissions{} | ||||||
|  | 		if err := perms.Parse(v.Permissions); err != nil { | ||||||
|  | 			return SASQueryParameters{}, err | ||||||
|  | 		} | ||||||
|  | 		v.Permissions = perms.String() | ||||||
|  | 	} else { | ||||||
|  | 		resource = "b" | ||||||
|  | 		// Make sure the permission characters are in the correct order
 | ||||||
|  | 		perms := &BlobSASPermissions{} | ||||||
|  | 		if err := perms.Parse(v.Permissions); err != nil { | ||||||
|  | 			return SASQueryParameters{}, err | ||||||
|  | 		} | ||||||
|  | 		v.Permissions = perms.String() | ||||||
|  | 	} | ||||||
|  | 	if v.Version == "" { | ||||||
|  | 		v.Version = SASVersion | ||||||
|  | 	} | ||||||
|  | 	startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) | ||||||
|  | 
 | ||||||
|  | 	signedIdentifier := v.Identifier | ||||||
|  | 
 | ||||||
|  | 	udk := credential.getUDKParams() | ||||||
|  | 
 | ||||||
|  | 	if udk != nil { | ||||||
|  | 		udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) | ||||||
|  | 		//I don't like this answer to combining the functions
 | ||||||
|  | 		//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
 | ||||||
|  | 		signedIdentifier = strings.Join([]string{ | ||||||
|  | 			udk.SignedOid, | ||||||
|  | 			udk.SignedTid, | ||||||
|  | 			udkStart, | ||||||
|  | 			udkExpiry, | ||||||
|  | 			udk.SignedService, | ||||||
|  | 			udk.SignedVersion, | ||||||
|  | 		}, "\n") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
 | ||||||
|  | 	stringToSign := strings.Join([]string{ | ||||||
|  | 		v.Permissions, | ||||||
|  | 		startTime, | ||||||
|  | 		expiryTime, | ||||||
|  | 		getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName), | ||||||
|  | 		signedIdentifier, | ||||||
|  | 		v.IPRange.String(), | ||||||
|  | 		string(v.Protocol), | ||||||
|  | 		v.Version, | ||||||
|  | 		resource, | ||||||
|  | 		snapshotTime,         // signed timestamp
 | ||||||
|  | 		v.CacheControl,       // rscc
 | ||||||
|  | 		v.ContentDisposition, // rscd
 | ||||||
|  | 		v.ContentEncoding,    // rsce
 | ||||||
|  | 		v.ContentLanguage,    // rscl
 | ||||||
|  | 		v.ContentType},       // rsct
 | ||||||
|  | 		"\n") | ||||||
|  | 
 | ||||||
|  | 	signature := "" | ||||||
|  | 	signature = credential.ComputeHMACSHA256(stringToSign) | ||||||
|  | 
 | ||||||
|  | 	p := SASQueryParameters{ | ||||||
|  | 		// Common SAS parameters
 | ||||||
|  | 		version:     v.Version, | ||||||
|  | 		protocol:    v.Protocol, | ||||||
|  | 		startTime:   v.StartTime, | ||||||
|  | 		expiryTime:  v.ExpiryTime, | ||||||
|  | 		permissions: v.Permissions, | ||||||
|  | 		ipRange:     v.IPRange, | ||||||
|  | 
 | ||||||
|  | 		// Container/Blob-specific SAS parameters
 | ||||||
|  | 		resource:           resource, | ||||||
|  | 		identifier:         v.Identifier, | ||||||
|  | 		cacheControl:       v.CacheControl, | ||||||
|  | 		contentDisposition: v.ContentDisposition, | ||||||
|  | 		contentEncoding:    v.ContentEncoding, | ||||||
|  | 		contentLanguage:    v.ContentLanguage, | ||||||
|  | 		contentType:        v.ContentType, | ||||||
|  | 		snapshotTime:       v.SnapshotTime, | ||||||
|  | 
 | ||||||
|  | 		// Calculated SAS signature
 | ||||||
|  | 		signature: signature, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	//User delegation SAS specific parameters
 | ||||||
|  | 	if udk != nil { | ||||||
|  | 		p.signedOid = udk.SignedOid | ||||||
|  | 		p.signedTid = udk.SignedTid | ||||||
|  | 		p.signedStart = udk.SignedStart | ||||||
|  | 		p.signedExpiry = udk.SignedExpiry | ||||||
|  | 		p.signedService = udk.SignedService | ||||||
|  | 		p.signedVersion = udk.SignedVersion | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return p, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
 | ||||||
|  | func getCanonicalName(account string, containerName string, blobName string) string { | ||||||
|  | 	// Container: "/blob/account/containername"
 | ||||||
|  | 	// Blob:      "/blob/account/containername/blobname"
 | ||||||
|  | 	elements := []string{"/blob/", account, "/", containerName} | ||||||
|  | 	if blobName != "" { | ||||||
|  | 		elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) | ||||||
|  | 	} | ||||||
|  | 	return strings.Join(elements, "") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
 | ||||||
|  | type ContainerSASPermissions struct { | ||||||
|  | 	Read, Add, Create, Write, Delete, List bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String produces the SAS permissions string for an Azure Storage container.
 | ||||||
|  | // Call this method to set BlobSASSignatureValues's Permissions field.
 | ||||||
|  | func (p ContainerSASPermissions) String() string { | ||||||
|  | 	var b bytes.Buffer | ||||||
|  | 	if p.Read { | ||||||
|  | 		b.WriteRune('r') | ||||||
|  | 	} | ||||||
|  | 	if p.Add { | ||||||
|  | 		b.WriteRune('a') | ||||||
|  | 	} | ||||||
|  | 	if p.Create { | ||||||
|  | 		b.WriteRune('c') | ||||||
|  | 	} | ||||||
|  | 	if p.Write { | ||||||
|  | 		b.WriteRune('w') | ||||||
|  | 	} | ||||||
|  | 	if p.Delete { | ||||||
|  | 		b.WriteRune('d') | ||||||
|  | 	} | ||||||
|  | 	if p.List { | ||||||
|  | 		b.WriteRune('l') | ||||||
|  | 	} | ||||||
|  | 	return b.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the ContainerSASPermissions's fields from a string.
 | ||||||
|  | func (p *ContainerSASPermissions) Parse(s string) error { | ||||||
|  | 	*p = ContainerSASPermissions{} // Clear the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 'r': | ||||||
|  | 			p.Read = true | ||||||
|  | 		case 'a': | ||||||
|  | 			p.Add = true | ||||||
|  | 		case 'c': | ||||||
|  | 			p.Create = true | ||||||
|  | 		case 'w': | ||||||
|  | 			p.Write = true | ||||||
|  | 		case 'd': | ||||||
|  | 			p.Delete = true | ||||||
|  | 		case 'l': | ||||||
|  | 			p.List = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("Invalid permission: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
 | ||||||
|  | type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } | ||||||
|  | 
 | ||||||
|  | // String produces the SAS permissions string for an Azure Storage blob.
 | ||||||
|  | // Call this method to set BlobSASSignatureValues's Permissions field.
 | ||||||
|  | func (p BlobSASPermissions) String() string { | ||||||
|  | 	var b bytes.Buffer | ||||||
|  | 	if p.Read { | ||||||
|  | 		b.WriteRune('r') | ||||||
|  | 	} | ||||||
|  | 	if p.Add { | ||||||
|  | 		b.WriteRune('a') | ||||||
|  | 	} | ||||||
|  | 	if p.Create { | ||||||
|  | 		b.WriteRune('c') | ||||||
|  | 	} | ||||||
|  | 	if p.Write { | ||||||
|  | 		b.WriteRune('w') | ||||||
|  | 	} | ||||||
|  | 	if p.Delete { | ||||||
|  | 		b.WriteRune('d') | ||||||
|  | 	} | ||||||
|  | 	return b.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the BlobSASPermissions's fields from a string.
 | ||||||
|  | func (p *BlobSASPermissions) Parse(s string) error { | ||||||
|  | 	*p = BlobSASPermissions{} // Clear the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 'r': | ||||||
|  | 			p.Read = true | ||||||
|  | 		case 'a': | ||||||
|  | 			p.Add = true | ||||||
|  | 		case 'c': | ||||||
|  | 			p.Create = true | ||||||
|  | 		case 'w': | ||||||
|  | 			p.Write = true | ||||||
|  | 		case 'd': | ||||||
|  | 			p.Delete = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("Invalid permission: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										195
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										195
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,195 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
 | ||||||
|  | 
 | ||||||
|  | // ServiceCode values indicate a service failure.
 | ||||||
|  | const ( | ||||||
|  | 	// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
 | ||||||
|  | 	ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobAlreadyExists means the specified blob already exists.
 | ||||||
|  | 	ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobNotFound means the specified blob does not exist.
 | ||||||
|  | 	ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
 | ||||||
|  | 	ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
 | ||||||
|  | 	ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
 | ||||||
|  | 	// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
 | ||||||
|  | 	ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
 | ||||||
|  | 	ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
 | ||||||
|  | 	ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
 | ||||||
|  | 	// Examine the HTTP status code and message for more information about the failure.
 | ||||||
|  | 	ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeContainerAlreadyExists means the specified container already exists.
 | ||||||
|  | 	ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
 | ||||||
|  | 	ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
 | ||||||
|  | 	ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeContainerNotFound means the specified container does not exist.
 | ||||||
|  | 	ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
 | ||||||
|  | 	ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
 | ||||||
|  | 	ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
 | ||||||
|  | 	ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
 | ||||||
|  | 	// that the operation for AppendBlob requires at least version 2015-02-21.
 | ||||||
|  | 	ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
 | ||||||
|  | 	ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
 | ||||||
|  | 	ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
 | ||||||
|  | 	ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
 | ||||||
|  | 	ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
 | ||||||
|  | 	ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
 | ||||||
|  | 	ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
 | ||||||
|  | 	ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidBlockList means the specified block list is invalid.
 | ||||||
|  | 	ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
 | ||||||
|  | 	ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidPageRange means the page range specified is invalid.
 | ||||||
|  | 	ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
 | ||||||
|  | 	ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
 | ||||||
|  | 	ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
 | ||||||
|  | 	ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
 | ||||||
|  | 	ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
 | ||||||
|  | 	ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
 | ||||||
|  | 	ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
 | ||||||
|  | 	ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
 | ||||||
|  | 	ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
 | ||||||
|  | 	ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
 | ||||||
|  | 	ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
 | ||||||
|  | 	ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
 | ||||||
|  | 	ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
 | ||||||
|  | 	ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
 | ||||||
|  | 	ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
 | ||||||
|  | 	ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
 | ||||||
|  | 	ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
 | ||||||
|  | 	ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
 | ||||||
|  | 	ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
 | ||||||
|  | 	ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
 | ||||||
|  | 	ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
 | ||||||
|  | 	ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
 | ||||||
|  | 	ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
 | ||||||
|  | 	ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
 | ||||||
|  | 	ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
 | ||||||
|  | 	ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
 | ||||||
|  | 	ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
 | ||||||
|  | 	ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
 | ||||||
|  | 	ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
 | ||||||
|  | 	ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeSystemInUse means this blob is in use by the system.
 | ||||||
|  | 	ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
 | ||||||
|  | 	ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
 | ||||||
|  | 	ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
 | ||||||
|  | 	ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
 | ||||||
|  | 	ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
 | ||||||
|  | 	ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" | ||||||
|  | ) | ||||||
							
								
								
									
										8
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										8
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,8 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential
 | ||||||
|  | type StorageAccountCredential interface { | ||||||
|  | 	AccountName() string | ||||||
|  | 	ComputeHMACSHA256(message string) (base64String string) | ||||||
|  | 	getUDKParams() *UserDelegationKey | ||||||
|  | } | ||||||
							
								
								
									
										128
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										128
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,128 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"io" | ||||||
|  | 	"net/url" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
 | ||||||
|  | 	AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
 | ||||||
|  | 
 | ||||||
|  | 	// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
 | ||||||
|  | 	AppendBlobMaxBlocks = 50000 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // AppendBlobURL defines a set of operations applicable to append blobs.
 | ||||||
|  | type AppendBlobURL struct { | ||||||
|  | 	BlobURL | ||||||
|  | 	abClient appendBlobClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { | ||||||
|  | 	blobClient := newBlobClient(url, p) | ||||||
|  | 	abClient := newAppendBlobClient(url, p) | ||||||
|  | 	return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
 | ||||||
|  | func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { | ||||||
|  | 	return NewAppendBlobURL(ab.blobClient.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
 | ||||||
|  | // Pass "" to remove the snapshot returning a URL to the base blob.
 | ||||||
|  | func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { | ||||||
|  | 	p := NewBlobURLParts(ab.URL()) | ||||||
|  | 	p.Snapshot = snapshot | ||||||
|  | 	return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
 | ||||||
|  | func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return ab.abClient.Create(ctx, 0, nil, | ||||||
|  | 		&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, | ||||||
|  | 		&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendBlock writes a stream to a new block of data to the end of the existing append blob.
 | ||||||
|  | // This method panics if the stream is not at position 0.
 | ||||||
|  | // Note that the http client closes the body stream after the request is sent to the service.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
 | ||||||
|  | func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() | ||||||
|  | 	count, err := validateSeekableStreamAt0AndGetCount(body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return ab.abClient.AppendBlock(ctx, body, count, nil, | ||||||
|  | 		transactionalMD5, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
 | ||||||
|  | func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() | ||||||
|  | 	sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() | ||||||
|  | 	ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() | ||||||
|  | 	return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), | ||||||
|  | 		transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type AppendBlobAccessConditions struct { | ||||||
|  | 	ModifiedAccessConditions | ||||||
|  | 	LeaseAccessConditions | ||||||
|  | 	AppendPositionAccessConditions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
 | ||||||
|  | type AppendPositionAccessConditions struct { | ||||||
|  | 	// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
 | ||||||
|  | 	// only if the append position is equal to a value.
 | ||||||
|  | 	// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
 | ||||||
|  | 	// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
 | ||||||
|  | 	// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
 | ||||||
|  | 	IfAppendPositionEqual int64 | ||||||
|  | 
 | ||||||
|  | 	// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
 | ||||||
|  | 	// only if the append blob's size is less than or equal to a value.
 | ||||||
|  | 	// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
 | ||||||
|  | 	// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
 | ||||||
|  | 	// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
 | ||||||
|  | 	IfMaxSizeLessThanOrEqual int64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // pointers is for internal infrastructure. It returns the fields as pointers.
 | ||||||
|  | func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { | ||||||
|  | 	var zero int64 // defaults to 0
 | ||||||
|  | 	switch ac.IfAppendPositionEqual { | ||||||
|  | 	case -1: | ||||||
|  | 		iape = &zero | ||||||
|  | 	case 0: | ||||||
|  | 		iape = nil | ||||||
|  | 	default: | ||||||
|  | 		iape = &ac.IfAppendPositionEqual | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch ac.IfMaxSizeLessThanOrEqual { | ||||||
|  | 	case -1: | ||||||
|  | 		imsltoe = &zero | ||||||
|  | 	case 0: | ||||||
|  | 		imsltoe = nil | ||||||
|  | 	default: | ||||||
|  | 		imsltoe = &ac.IfMaxSizeLessThanOrEqual | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | @ -0,0 +1,216 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"net/url" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
 | ||||||
|  | type BlobURL struct { | ||||||
|  | 	blobClient blobClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { | ||||||
|  | 	blobClient := newBlobClient(url, p) | ||||||
|  | 	return BlobURL{blobClient: blobClient} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // URL returns the URL endpoint used by the BlobURL object.
 | ||||||
|  | func (b BlobURL) URL() url.URL { | ||||||
|  | 	return b.blobClient.URL() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns the URL as a string.
 | ||||||
|  | func (b BlobURL) String() string { | ||||||
|  | 	u := b.URL() | ||||||
|  | 	return u.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
 | ||||||
|  | func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { | ||||||
|  | 	return NewBlobURL(b.blobClient.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
 | ||||||
|  | // Pass "" to remove the snapshot returning a URL to the base blob.
 | ||||||
|  | func (b BlobURL) WithSnapshot(snapshot string) BlobURL { | ||||||
|  | 	p := NewBlobURLParts(b.URL()) | ||||||
|  | 	p.Snapshot = snapshot | ||||||
|  | 	return NewBlobURL(p.URL(), b.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
 | ||||||
|  | func (b BlobURL) ToAppendBlobURL() AppendBlobURL { | ||||||
|  | 	return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
 | ||||||
|  | func (b BlobURL) ToBlockBlobURL() BlockBlobURL { | ||||||
|  | 	return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
 | ||||||
|  | func (b BlobURL) ToPageBlobURL() PageBlobURL { | ||||||
|  | 	return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
 | ||||||
|  | // Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
 | ||||||
|  | func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { | ||||||
|  | 	var xRangeGetContentMD5 *bool | ||||||
|  | 	if rangeGetContentMD5 { | ||||||
|  | 		xRangeGetContentMD5 = &rangeGetContentMD5 | ||||||
|  | 	} | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	dr, err := b.blobClient.Download(ctx, nil, nil, | ||||||
|  | 		httpRange{offset: offset, count: count}.pointers(), | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return &DownloadResponse{ | ||||||
|  | 		b:       b, | ||||||
|  | 		r:       dr, | ||||||
|  | 		ctx:     ctx, | ||||||
|  | 		getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, | ||||||
|  | 	}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
 | ||||||
|  | // Note that deleting a blob also deletes all its snapshots.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
 | ||||||
|  | func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
 | ||||||
|  | func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { | ||||||
|  | 	return b.blobClient.Undelete(ctx, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetTier operation sets the tier on a blob. The operation is allowed on a page
 | ||||||
|  | // blob in a premium storage account and on a block blob in a blob storage account (locally
 | ||||||
|  | // redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
 | ||||||
|  | // bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
 | ||||||
|  | // does not update the blob's ETag.
 | ||||||
|  | // For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
 | ||||||
|  | func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) { | ||||||
|  | 	return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetBlobProperties returns the blob's properties.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
 | ||||||
|  | func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetBlobHTTPHeaders changes a blob's HTTP headers.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
 | ||||||
|  | func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return b.blobClient.SetHTTPHeaders(ctx, nil, | ||||||
|  | 		&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, | ||||||
|  | 		&h.ContentDisposition, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetBlobMetadata changes a blob's metadata.
 | ||||||
|  | // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
 | ||||||
|  | func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CreateSnapshot creates a read-only snapshot of a blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
 | ||||||
|  | func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { | ||||||
|  | 	// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
 | ||||||
|  | 	// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
 | ||||||
|  | 	// performance hit.
 | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
 | ||||||
|  | // 15 to 60 seconds, or infinite (-1).
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
 | ||||||
|  | func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() | ||||||
|  | 	return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RenewLease renews the blob's previously-acquired lease.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
 | ||||||
|  | func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() | ||||||
|  | 	return b.blobClient.RenewLease(ctx, leaseID, nil, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReleaseLease releases the blob's previously-acquired lease.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
 | ||||||
|  | func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() | ||||||
|  | 	return b.blobClient.ReleaseLease(ctx, leaseID, nil, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
 | ||||||
|  | // constant to break a fixed-duration lease when it expires or an infinite lease immediately.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
 | ||||||
|  | func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() | ||||||
|  | 	return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ChangeLease changes the blob's lease ID.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
 | ||||||
|  | func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() | ||||||
|  | 	return b.blobClient.ChangeLease(ctx, leaseID, proposedID, | ||||||
|  | 		nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
 | ||||||
|  | const LeaseBreakNaturally = -1 | ||||||
|  | 
 | ||||||
|  | func leasePeriodPointer(period int32) (p *int32) { | ||||||
|  | 	if period != LeaseBreakNaturally { | ||||||
|  | 		p = &period | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StartCopyFromURL copies the data at the source URL to a blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
 | ||||||
|  | func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { | ||||||
|  | 	srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() | ||||||
|  | 	dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() | ||||||
|  | 	dstLeaseID := dstac.LeaseAccessConditions.pointers() | ||||||
|  | 
 | ||||||
|  | 	return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, | ||||||
|  | 		srcIfModifiedSince, srcIfUnmodifiedSince, | ||||||
|  | 		srcIfMatchETag, srcIfNoneMatchETag, | ||||||
|  | 		dstIfModifiedSince, dstIfUnmodifiedSince, | ||||||
|  | 		dstIfMatchETag, dstIfNoneMatchETag, | ||||||
|  | 		dstLeaseID, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
 | ||||||
|  | func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { | ||||||
|  | 	return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) | ||||||
|  | } | ||||||
							
								
								
									
										162
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										162
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,162 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"io" | ||||||
|  | 	"net/url" | ||||||
|  | 
 | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"encoding/binary" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
 | ||||||
|  | 	BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
 | ||||||
|  | 
 | ||||||
|  | 	// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
 | ||||||
|  | 	BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB
 | ||||||
|  | 
 | ||||||
|  | 	// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
 | ||||||
|  | 	BlockBlobMaxBlocks = 50000 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BlockBlobURL defines a set of operations applicable to block blobs.
 | ||||||
|  | type BlockBlobURL struct { | ||||||
|  | 	BlobURL | ||||||
|  | 	bbClient blockBlobClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { | ||||||
|  | 	blobClient := newBlobClient(url, p) | ||||||
|  | 	bbClient := newBlockBlobClient(url, p) | ||||||
|  | 	return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
 | ||||||
|  | func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { | ||||||
|  | 	return NewBlockBlobURL(bb.blobClient.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
 | ||||||
|  | // Pass "" to remove the snapshot returning a URL to the base blob.
 | ||||||
|  | func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { | ||||||
|  | 	p := NewBlobURLParts(bb.URL()) | ||||||
|  | 	p.Snapshot = snapshot | ||||||
|  | 	return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Upload creates a new block blob or overwrites an existing block blob.
 | ||||||
|  | // Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
 | ||||||
|  | // supported with Upload; the content of the existing blob is overwritten with the new content. To
 | ||||||
|  | // perform a partial update of a block blob, use StageBlock and CommitBlockList.
 | ||||||
|  | // This method panics if the stream is not at position 0.
 | ||||||
|  | // Note that the http client closes the body stream after the request is sent to the service.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
 | ||||||
|  | func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	count, err := validateSeekableStreamAt0AndGetCount(body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return bb.bbClient.Upload(ctx, body, count, nil, | ||||||
|  | 		&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, | ||||||
|  | 		&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, | ||||||
|  | 		nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
 | ||||||
|  | // Note that the http client closes the body stream after the request is sent to the service.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
 | ||||||
|  | func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) { | ||||||
|  | 	count, err := validateSeekableStreamAt0AndGetCount(body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
 | ||||||
|  | // If count is CountToEnd (0), then data is read from specified offset to the end.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
 | ||||||
|  | func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { | ||||||
|  | 	sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() | ||||||
|  | 	return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
 | ||||||
|  | // In order to be written as part of a blob, a block must have been successfully written
 | ||||||
|  | // to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
 | ||||||
|  | // by uploading only those blocks that have changed, then committing the new and existing
 | ||||||
|  | // blocks together. Any blocks not specified in the block list and permanently deleted.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
 | ||||||
|  | func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, | ||||||
|  | 	metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, | ||||||
|  | 		&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, | ||||||
|  | 		metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
 | ||||||
|  | func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { | ||||||
|  | 	return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | //////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | type BlockID [64]byte | ||||||
|  | 
 | ||||||
|  | func (blockID BlockID) ToBase64() string { | ||||||
|  | 	return base64.StdEncoding.EncodeToString(blockID[:]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (blockID *BlockID) FromBase64(s string) error { | ||||||
|  | 	*blockID = BlockID{} // Zero out the block ID
 | ||||||
|  | 	_, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s)) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | //////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | type uuidBlockID BlockID | ||||||
|  | 
 | ||||||
|  | func (ubi uuidBlockID) UUID() uuid { | ||||||
|  | 	u := uuid{} | ||||||
|  | 	copy(u[:], ubi[:len(u)]) | ||||||
|  | 	return u | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ubi uuidBlockID) Number() uint32 { | ||||||
|  | 	return binary.BigEndian.Uint32(ubi[len(uuid{}):]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newUuidBlockID(u uuid) uuidBlockID { | ||||||
|  | 	ubi := uuidBlockID{}     // Create a new uuidBlockID
 | ||||||
|  | 	copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it
 | ||||||
|  | 	// Block number defaults to 0
 | ||||||
|  | 	return ubi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID { | ||||||
|  | 	copy(ubi[:len(u)], u[:]) | ||||||
|  | 	return ubi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { | ||||||
|  | 	binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID
 | ||||||
|  | 	return ubi                                                 // Return the passed-in copy
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ubi uuidBlockID) ToBase64() string { | ||||||
|  | 	return BlockID(ubi).ToBase64() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (ubi *uuidBlockID) FromBase64(s string) error { | ||||||
|  | 	return (*BlockID)(ubi).FromBase64(s) | ||||||
|  | } | ||||||
							
								
								
									
										295
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										295
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,295 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"net/url" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
 | ||||||
|  | type ContainerURL struct { | ||||||
|  | 	client containerClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { | ||||||
|  | 	client := newContainerClient(url, p) | ||||||
|  | 	return ContainerURL{client: client} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // URL returns the URL endpoint used by the ContainerURL object.
 | ||||||
|  | func (c ContainerURL) URL() url.URL { | ||||||
|  | 	return c.client.URL() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns the URL as a string.
 | ||||||
|  | func (c ContainerURL) String() string { | ||||||
|  | 	u := c.URL() | ||||||
|  | 	return u.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
 | ||||||
|  | func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { | ||||||
|  | 	return NewContainerURL(c.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
 | ||||||
|  | // ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
 | ||||||
|  | // To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
 | ||||||
|  | // desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
 | ||||||
|  | // NewBlobURL method.
 | ||||||
|  | func (c ContainerURL) NewBlobURL(blobName string) BlobURL { | ||||||
|  | 	blobURL := appendToURLPath(c.URL(), blobName) | ||||||
|  | 	return NewBlobURL(blobURL, c.client.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
 | ||||||
|  | // ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
 | ||||||
|  | // To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
 | ||||||
|  | // desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
 | ||||||
|  | // NewAppendBlobURL method.
 | ||||||
|  | func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { | ||||||
|  | 	blobURL := appendToURLPath(c.URL(), blobName) | ||||||
|  | 	return NewAppendBlobURL(blobURL, c.client.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
 | ||||||
|  | // ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
 | ||||||
|  | // To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
 | ||||||
|  | // desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
 | ||||||
|  | // NewBlockBlobURL method.
 | ||||||
|  | func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { | ||||||
|  | 	blobURL := appendToURLPath(c.URL(), blobName) | ||||||
|  | 	return NewBlockBlobURL(blobURL, c.client.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
 | ||||||
|  | // ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
 | ||||||
|  | // To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
 | ||||||
|  | // desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
 | ||||||
|  | // NewPageBlobURL method.
 | ||||||
|  | func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { | ||||||
|  | 	blobURL := appendToURLPath(c.URL(), blobName) | ||||||
|  | 	return NewPageBlobURL(blobURL, c.client.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
 | ||||||
|  | func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { | ||||||
|  | 	return c.client.Create(ctx, nil, metadata, publicAccessType, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
 | ||||||
|  | func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { | ||||||
|  | 	if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { | ||||||
|  | 		return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetProperties returns the container's properties.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
 | ||||||
|  | func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { | ||||||
|  | 	// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
 | ||||||
|  | 	// This allows us to not expose a GetProperties method at all simplifying the API.
 | ||||||
|  | 	return c.client.GetProperties(ctx, nil, ac.pointers(), nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetMetadata sets the container's metadata.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
 | ||||||
|  | func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { | ||||||
|  | 	if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { | ||||||
|  | 		return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") | ||||||
|  | 	} | ||||||
|  | 	ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
 | ||||||
|  | func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { | ||||||
|  | 	return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
 | ||||||
|  | type AccessPolicyPermission struct { | ||||||
|  | 	Read, Add, Create, Write, Delete, List bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String produces the access policy permission string for an Azure Storage container.
 | ||||||
|  | // Call this method to set AccessPolicy's Permission field.
 | ||||||
|  | func (p AccessPolicyPermission) String() string { | ||||||
|  | 	var b bytes.Buffer | ||||||
|  | 	if p.Read { | ||||||
|  | 		b.WriteRune('r') | ||||||
|  | 	} | ||||||
|  | 	if p.Add { | ||||||
|  | 		b.WriteRune('a') | ||||||
|  | 	} | ||||||
|  | 	if p.Create { | ||||||
|  | 		b.WriteRune('c') | ||||||
|  | 	} | ||||||
|  | 	if p.Write { | ||||||
|  | 		b.WriteRune('w') | ||||||
|  | 	} | ||||||
|  | 	if p.Delete { | ||||||
|  | 		b.WriteRune('d') | ||||||
|  | 	} | ||||||
|  | 	if p.List { | ||||||
|  | 		b.WriteRune('l') | ||||||
|  | 	} | ||||||
|  | 	return b.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the AccessPolicyPermission's fields from a string.
 | ||||||
|  | func (p *AccessPolicyPermission) Parse(s string) error { | ||||||
|  | 	*p = AccessPolicyPermission{} // Clear the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 'r': | ||||||
|  | 			p.Read = true | ||||||
|  | 		case 'a': | ||||||
|  | 			p.Add = true | ||||||
|  | 		case 'c': | ||||||
|  | 			p.Create = true | ||||||
|  | 		case 'w': | ||||||
|  | 			p.Write = true | ||||||
|  | 		case 'd': | ||||||
|  | 			p.Delete = true | ||||||
|  | 		case 'l': | ||||||
|  | 			p.List = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("invalid permission: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
 | ||||||
|  | func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, | ||||||
|  | 	ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { | ||||||
|  | 	if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { | ||||||
|  | 		return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") | ||||||
|  | 	} | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		accessType, ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
 | ||||||
|  | func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() | ||||||
|  | 	return c.client.AcquireLease(ctx, nil, &duration, &proposedID, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RenewLease renews the container's previously-acquired lease.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
 | ||||||
|  | func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() | ||||||
|  | 	return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReleaseLease releases the container's previously-acquired lease.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
 | ||||||
|  | func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() | ||||||
|  | 	return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BreakLease breaks the container's previously-acquired lease (if it exists).
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
 | ||||||
|  | func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() | ||||||
|  | 	return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ChangeLease changes the container's lease ID.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
 | ||||||
|  | func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() | ||||||
|  | 	return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
 | ||||||
|  | // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
 | ||||||
|  | // After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
 | ||||||
|  | // previously-returned Marker) to get the next segment.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
 | ||||||
|  | func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { | ||||||
|  | 	prefix, include, maxResults := o.pointers() | ||||||
|  | 	return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
 | ||||||
|  | // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
 | ||||||
|  | // After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
 | ||||||
|  | // previously-returned Marker) to get the next segment.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
 | ||||||
|  | func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { | ||||||
|  | 	if o.Details.Snapshots { | ||||||
|  | 		return nil, errors.New("snapshots are not supported in this listing operation") | ||||||
|  | 	} | ||||||
|  | 	prefix, include, maxResults := o.pointers() | ||||||
|  | 	return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListBlobsSegmentOptions defines options available when calling ListBlobs.
 | ||||||
|  | type ListBlobsSegmentOptions struct { | ||||||
|  | 	Details BlobListingDetails // No IncludeType header is produced if ""
 | ||||||
|  | 	Prefix  string             // No Prefix header is produced if ""
 | ||||||
|  | 
 | ||||||
|  | 	// SetMaxResults sets the maximum desired results you want the service to return. Note, the
 | ||||||
|  | 	// service may return fewer results than requested.
 | ||||||
|  | 	// MaxResults=0 means no 'MaxResults' header specified.
 | ||||||
|  | 	MaxResults int32 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { | ||||||
|  | 	if o.Prefix != "" { | ||||||
|  | 		prefix = &o.Prefix | ||||||
|  | 	} | ||||||
|  | 	include = o.Details.slice() | ||||||
|  | 	if o.MaxResults != 0 { | ||||||
|  | 		maxResults = &o.MaxResults | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobListingDetails indicates what additional information the service should return with each blob.
 | ||||||
|  | type BlobListingDetails struct { | ||||||
|  | 	Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // string produces the Include query parameter's value.
 | ||||||
|  | func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { | ||||||
|  | 	items := []ListBlobsIncludeItemType{} | ||||||
|  | 	// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
 | ||||||
|  | 	if d.Copy { | ||||||
|  | 		items = append(items, ListBlobsIncludeItemCopy) | ||||||
|  | 	} | ||||||
|  | 	if d.Deleted { | ||||||
|  | 		items = append(items, ListBlobsIncludeItemDeleted) | ||||||
|  | 	} | ||||||
|  | 	if d.Metadata { | ||||||
|  | 		items = append(items, ListBlobsIncludeItemMetadata) | ||||||
|  | 	} | ||||||
|  | 	if d.Snapshots { | ||||||
|  | 		items = append(items, ListBlobsIncludeItemSnapshots) | ||||||
|  | 	} | ||||||
|  | 	if d.UncommittedBlobs { | ||||||
|  | 		items = append(items, ListBlobsIncludeItemUncommittedblobs) | ||||||
|  | 	} | ||||||
|  | 	return items | ||||||
|  | } | ||||||
							
								
								
									
										223
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										223
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,223 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// PageBlobPageBytes indicates the number of bytes in a page (512).
 | ||||||
|  | 	PageBlobPageBytes = 512 | ||||||
|  | 
 | ||||||
|  | 	// PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
 | ||||||
|  | 	PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // PageBlobURL defines a set of operations applicable to page blobs.
 | ||||||
|  | type PageBlobURL struct { | ||||||
|  | 	BlobURL | ||||||
|  | 	pbClient pageBlobClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { | ||||||
|  | 	blobClient := newBlobClient(url, p) | ||||||
|  | 	pbClient := newPageBlobClient(url, p) | ||||||
|  | 	return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
 | ||||||
|  | func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { | ||||||
|  | 	return NewPageBlobURL(pb.blobClient.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
 | ||||||
|  | // Pass "" to remove the snapshot returning a URL to the base blob.
 | ||||||
|  | func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { | ||||||
|  | 	p := NewBlobURLParts(pb.URL()) | ||||||
|  | 	p.Snapshot = snapshot | ||||||
|  | 	return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
 | ||||||
|  | func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.Create(ctx, 0, size, nil, | ||||||
|  | 		&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, | ||||||
|  | 		metadata, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		&h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
 | ||||||
|  | // This method panics if the stream is not at position 0.
 | ||||||
|  | // Note that the http client closes the body stream after the request is sent to the service.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
 | ||||||
|  | func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) { | ||||||
|  | 	count, err := validateSeekableStreamAt0AndGetCount(body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, | ||||||
|  | 		PageRange{Start: offset, End: offset + count - 1}.pointers(), | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
 | ||||||
|  | // The sourceOffset specifies the start offset of source data to copy from.
 | ||||||
|  | // The destOffset specifies the start offset of data in page blob will be written to.
 | ||||||
|  | // The count must be a multiple of 512 bytes.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
 | ||||||
|  | func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() | ||||||
|  | 	sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() | ||||||
|  | 	ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, | ||||||
|  | 		*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ClearPages frees the specified pages from the page blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
 | ||||||
|  | func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.ClearPages(ctx, 0, nil, | ||||||
|  | 		PageRange{Start: offset, End: offset + count - 1}.pointers(), | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, | ||||||
|  | 		ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
 | ||||||
|  | func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.GetPageRanges(ctx, nil, nil, | ||||||
|  | 		httpRange{offset: offset, count: count}.pointers(), | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
 | ||||||
|  | func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, | ||||||
|  | 		httpRange{offset: offset, count: count}.pointers(), | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, | ||||||
|  | 		nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Resize resizes the page blob to the specified size (which must be a multiple of 512).
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
 | ||||||
|  | func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetSequenceNumber sets the page blob's sequence number.
 | ||||||
|  | func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, | ||||||
|  | 	ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { | ||||||
|  | 	sn := &sequenceNumber | ||||||
|  | 	if action == SequenceNumberActionIncrement { | ||||||
|  | 		sn = nil | ||||||
|  | 	} | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, | ||||||
|  | 		ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, | ||||||
|  | 		sn, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
 | ||||||
|  | // The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
 | ||||||
|  | // The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
 | ||||||
|  | // https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
 | ||||||
|  | func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { | ||||||
|  | 	ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() | ||||||
|  | 	qp := source.Query() | ||||||
|  | 	qp.Set("snapshot", snapshot) | ||||||
|  | 	source.RawQuery = qp.Encode() | ||||||
|  | 	return pb.pbClient.CopyIncremental(ctx, source.String(), nil, | ||||||
|  | 		ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (pr PageRange) pointers() *string { | ||||||
|  | 	endOffset := strconv.FormatInt(int64(pr.End), 10) | ||||||
|  | 	asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) | ||||||
|  | 	return &asString | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type PageBlobAccessConditions struct { | ||||||
|  | 	ModifiedAccessConditions | ||||||
|  | 	LeaseAccessConditions | ||||||
|  | 	SequenceNumberAccessConditions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
 | ||||||
|  | type SequenceNumberAccessConditions struct { | ||||||
|  | 	// IfSequenceNumberLessThan ensures that the page blob operation succeeds
 | ||||||
|  | 	// only if the blob's sequence number is less than a value.
 | ||||||
|  | 	// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
 | ||||||
|  | 	// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
 | ||||||
|  | 	// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
 | ||||||
|  | 	IfSequenceNumberLessThan int64 | ||||||
|  | 
 | ||||||
|  | 	// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
 | ||||||
|  | 	// only if the blob's sequence number is less than or equal to a value.
 | ||||||
|  | 	// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
 | ||||||
|  | 	// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
 | ||||||
|  | 	// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
 | ||||||
|  | 	IfSequenceNumberLessThanOrEqual int64 | ||||||
|  | 
 | ||||||
|  | 	// IfSequenceNumberEqual ensures that the page blob operation succeeds
 | ||||||
|  | 	// only if the blob's sequence number is equal to a value.
 | ||||||
|  | 	// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
 | ||||||
|  | 	// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
 | ||||||
|  | 	// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
 | ||||||
|  | 	IfSequenceNumberEqual int64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // pointers is for internal infrastructure. It returns the fields as pointers.
 | ||||||
|  | func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { | ||||||
|  | 	var zero int64 // Defaults to 0
 | ||||||
|  | 	switch ac.IfSequenceNumberLessThan { | ||||||
|  | 	case -1: | ||||||
|  | 		snlt = &zero | ||||||
|  | 	case 0: | ||||||
|  | 		snlt = nil | ||||||
|  | 	default: | ||||||
|  | 		snlt = &ac.IfSequenceNumberLessThan | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch ac.IfSequenceNumberLessThanOrEqual { | ||||||
|  | 	case -1: | ||||||
|  | 		snltoe = &zero | ||||||
|  | 	case 0: | ||||||
|  | 		snltoe = nil | ||||||
|  | 	default: | ||||||
|  | 		snltoe = &ac.IfSequenceNumberLessThanOrEqual | ||||||
|  | 	} | ||||||
|  | 	switch ac.IfSequenceNumberEqual { | ||||||
|  | 	case -1: | ||||||
|  | 		sne = &zero | ||||||
|  | 	case 0: | ||||||
|  | 		sne = nil | ||||||
|  | 	default: | ||||||
|  | 		sne = &ac.IfSequenceNumberEqual | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
							
								
								
									
										145
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										145
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,145 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
 | ||||||
|  | 	ContainerNameRoot = "$root" | ||||||
|  | 
 | ||||||
|  | 	// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
 | ||||||
|  | 	ContainerNameLogs = "$logs" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
 | ||||||
|  | type ServiceURL struct { | ||||||
|  | 	client serviceClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
 | ||||||
|  | func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { | ||||||
|  | 	client := newServiceClient(primaryURL, p) | ||||||
|  | 	return ServiceURL{client: client} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | //GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
 | ||||||
|  | //OAuth is required for this call, as well as any role that can delegate access to the storage account.
 | ||||||
|  | func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { | ||||||
|  | 	sc := newServiceClient(s.client.url, s.client.p) | ||||||
|  | 	udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return UserDelegationCredential{}, err | ||||||
|  | 	} | ||||||
|  | 	return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // URL returns the URL endpoint used by the ServiceURL object.
 | ||||||
|  | func (s ServiceURL) URL() url.URL { | ||||||
|  | 	return s.client.URL() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns the URL as a string.
 | ||||||
|  | func (s ServiceURL) String() string { | ||||||
|  | 	u := s.URL() | ||||||
|  | 	return u.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
 | ||||||
|  | func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { | ||||||
|  | 	return NewServiceURL(s.URL(), p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
 | ||||||
|  | // ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
 | ||||||
|  | // To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
 | ||||||
|  | // desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
 | ||||||
|  | // NewContainerURL method.
 | ||||||
|  | func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { | ||||||
|  | 	containerURL := appendToURLPath(s.URL(), containerName) | ||||||
|  | 	return NewContainerURL(containerURL, s.client.Pipeline()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
 | ||||||
|  | func appendToURLPath(u url.URL, name string) url.URL { | ||||||
|  | 	// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
 | ||||||
|  | 	// When you call url.Parse() this is what you'll get:
 | ||||||
|  | 	//     Scheme: "https"
 | ||||||
|  | 	//     Opaque: ""
 | ||||||
|  | 	//       User: nil
 | ||||||
|  | 	//       Host: "ms.com"
 | ||||||
|  | 	//       Path: "/a/b/"	This should start with a / and it might or might not have a trailing slash
 | ||||||
|  | 	//    RawPath: ""
 | ||||||
|  | 	// ForceQuery: false
 | ||||||
|  | 	//   RawQuery: "k1=v1&k2=v2"
 | ||||||
|  | 	//   Fragment: "f"
 | ||||||
|  | 	if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { | ||||||
|  | 		u.Path += "/" // Append "/" to end before appending name
 | ||||||
|  | 	} | ||||||
|  | 	u.Path += name | ||||||
|  | 	return u | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
 | ||||||
|  | // Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
 | ||||||
|  | // After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
 | ||||||
|  | // previously-returned Marker) to get the next segment. For more information, see
 | ||||||
|  | // https://docs.microsoft.com/rest/api/storageservices/list-containers2.
 | ||||||
|  | func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { | ||||||
|  | 	prefix, include, maxResults := o.pointers() | ||||||
|  | 	return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListContainersOptions defines options available when calling ListContainers.
 | ||||||
|  | type ListContainersSegmentOptions struct { | ||||||
|  | 	Detail     ListContainersDetail // No IncludeType header is produced if ""
 | ||||||
|  | 	Prefix     string               // No Prefix header is produced if ""
 | ||||||
|  | 	MaxResults int32                // 0 means unspecified
 | ||||||
|  | 	// TODO: update swagger to generate this type?
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { | ||||||
|  | 	if o.Prefix != "" { | ||||||
|  | 		prefix = &o.Prefix | ||||||
|  | 	} | ||||||
|  | 	if o.MaxResults != 0 { | ||||||
|  | 		maxResults = &o.MaxResults | ||||||
|  | 	} | ||||||
|  | 	include = ListContainersIncludeType(o.Detail.string()) | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListContainersFlatDetail indicates what additional information the service should return with each container.
 | ||||||
|  | type ListContainersDetail struct { | ||||||
|  | 	// Tells the service whether to return metadata for each container.
 | ||||||
|  | 	Metadata bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // string produces the Include query parameter's value.
 | ||||||
|  | func (d *ListContainersDetail) string() string { | ||||||
|  | 	items := make([]string, 0, 1) | ||||||
|  | 	// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
 | ||||||
|  | 	if d.Metadata { | ||||||
|  | 		items = append(items, string(ListContainersIncludeMetadata)) | ||||||
|  | 	} | ||||||
|  | 	if len(items) > 0 { | ||||||
|  | 		return strings.Join(items, ",") | ||||||
|  | 	} | ||||||
|  | 	return string(ListContainersIncludeNone) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { | ||||||
|  | 	return bsu.client.GetProperties(ctx, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { | ||||||
|  | 	return bsu.client.SetProperties(ctx, properties, nil, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { | ||||||
|  | 	return bsu.client.GetStatistics(ctx, nil, nil) | ||||||
|  | } | ||||||
							
								
								
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,38 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"crypto/hmac" | ||||||
|  | 	"crypto/sha256" | ||||||
|  | 	"encoding/base64" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it
 | ||||||
|  | func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { | ||||||
|  | 	return UserDelegationCredential{ | ||||||
|  | 		accountName: accountName, | ||||||
|  | 		accountKey:  key, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type UserDelegationCredential struct { | ||||||
|  | 	accountName string | ||||||
|  | 	accountKey  UserDelegationKey | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountName returns the Storage account's name
 | ||||||
|  | func (f UserDelegationCredential) AccountName() string { | ||||||
|  | 	return f.accountName | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ComputeHMAC
 | ||||||
|  | func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { | ||||||
|  | 	bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) | ||||||
|  | 	h := hmac.New(sha256.New, bytes) | ||||||
|  | 	h.Write([]byte(message)) | ||||||
|  | 	return base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Private method to return important parameters for NewSASQueryParameters
 | ||||||
|  | func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { | ||||||
|  | 	return &f.accountKey | ||||||
|  | } | ||||||
|  | @ -0,0 +1,3 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | const serviceLibVersion = "0.7" | ||||||
							
								
								
									
										55
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										55
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,55 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Credential represent any credential type; it is used to create a credential policy Factory.
 | ||||||
|  | type Credential interface { | ||||||
|  | 	pipeline.Factory | ||||||
|  | 	credentialMarker() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type credentialFunc pipeline.FactoryFunc | ||||||
|  | 
 | ||||||
|  | func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return f(next, po) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
 | ||||||
|  | func (credentialFunc) credentialMarker() {} | ||||||
|  | 
 | ||||||
|  | //////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | // NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
 | ||||||
|  | // or for use with Shared Access Signatures (SAS).
 | ||||||
|  | func NewAnonymousCredential() Credential { | ||||||
|  | 	return anonymousCredentialFactory | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
 | ||||||
|  | 
 | ||||||
|  | // anonymousCredentialPolicyFactory is the credential's policy factory.
 | ||||||
|  | type anonymousCredentialPolicyFactory struct { | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New creates a credential policy object.
 | ||||||
|  | func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return &anonymousCredentialPolicy{next: next} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
 | ||||||
|  | func (*anonymousCredentialPolicyFactory) credentialMarker() {} | ||||||
|  | 
 | ||||||
|  | // anonymousCredentialPolicy is the credential's policy object.
 | ||||||
|  | type anonymousCredentialPolicy struct { | ||||||
|  | 	next pipeline.Policy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Do implements the credential's policy interface.
 | ||||||
|  | func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 	// For anonymous credentials, this is effectively a no-op
 | ||||||
|  | 	return p.next.Do(ctx, request) | ||||||
|  | } | ||||||
							
								
								
									
										205
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										205
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,205 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"crypto/hmac" | ||||||
|  | 	"crypto/sha256" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"errors" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"sort" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
 | ||||||
|  | // storage account's name and either its primary or secondary key.
 | ||||||
|  | func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { | ||||||
|  | 	bytes, err := base64.StdEncoding.DecodeString(accountKey) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return &SharedKeyCredential{}, err | ||||||
|  | 	} | ||||||
|  | 	return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SharedKeyCredential contains an account's name and its primary or secondary key.
 | ||||||
|  | // It is immutable making it shareable and goroutine-safe.
 | ||||||
|  | type SharedKeyCredential struct { | ||||||
|  | 	// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
 | ||||||
|  | 	accountName string | ||||||
|  | 	accountKey  []byte | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AccountName returns the Storage account's name.
 | ||||||
|  | func (f SharedKeyCredential) AccountName() string { | ||||||
|  | 	return f.accountName | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f SharedKeyCredential) getAccountKey() []byte { | ||||||
|  | 	return f.accountKey | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // noop function to satisfy StorageAccountCredential interface
 | ||||||
|  | func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New creates a credential policy object.
 | ||||||
|  | func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 		// Add a x-ms-date header if it doesn't already exist
 | ||||||
|  | 		if d := request.Header.Get(headerXmsDate); d == "" { | ||||||
|  | 			request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} | ||||||
|  | 		} | ||||||
|  | 		stringToSign, err := f.buildStringToSign(request) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		signature := f.ComputeHMACSHA256(stringToSign) | ||||||
|  | 		authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") | ||||||
|  | 		request.Header[headerAuthorization] = []string{authHeader} | ||||||
|  | 
 | ||||||
|  | 		response, err := next.Do(ctx, request) | ||||||
|  | 		if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { | ||||||
|  | 			// Service failed to authenticate request, log it
 | ||||||
|  | 			po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") | ||||||
|  | 		} | ||||||
|  | 		return response, err | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
 | ||||||
|  | func (*SharedKeyCredential) credentialMarker() {} | ||||||
|  | 
 | ||||||
|  | // Constants ensuring that header names are correctly spelled and consistently cased.
 | ||||||
|  | const ( | ||||||
|  | 	headerAuthorization      = "Authorization" | ||||||
|  | 	headerCacheControl       = "Cache-Control" | ||||||
|  | 	headerContentEncoding    = "Content-Encoding" | ||||||
|  | 	headerContentDisposition = "Content-Disposition" | ||||||
|  | 	headerContentLanguage    = "Content-Language" | ||||||
|  | 	headerContentLength      = "Content-Length" | ||||||
|  | 	headerContentMD5         = "Content-MD5" | ||||||
|  | 	headerContentType        = "Content-Type" | ||||||
|  | 	headerDate               = "Date" | ||||||
|  | 	headerIfMatch            = "If-Match" | ||||||
|  | 	headerIfModifiedSince    = "If-Modified-Since" | ||||||
|  | 	headerIfNoneMatch        = "If-None-Match" | ||||||
|  | 	headerIfUnmodifiedSince  = "If-Unmodified-Since" | ||||||
|  | 	headerRange              = "Range" | ||||||
|  | 	headerUserAgent          = "User-Agent" | ||||||
|  | 	headerXmsDate            = "x-ms-date" | ||||||
|  | 	headerXmsVersion         = "x-ms-version" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
 | ||||||
|  | func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { | ||||||
|  | 	h := hmac.New(sha256.New, f.accountKey) | ||||||
|  | 	h.Write([]byte(message)) | ||||||
|  | 	return base64.StdEncoding.EncodeToString(h.Sum(nil)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { | ||||||
|  | 	// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
 | ||||||
|  | 	headers := request.Header | ||||||
|  | 	contentLength := headers.Get(headerContentLength) | ||||||
|  | 	if contentLength == "0" { | ||||||
|  | 		contentLength = "" | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	stringToSign := strings.Join([]string{ | ||||||
|  | 		request.Method, | ||||||
|  | 		headers.Get(headerContentEncoding), | ||||||
|  | 		headers.Get(headerContentLanguage), | ||||||
|  | 		contentLength, | ||||||
|  | 		headers.Get(headerContentMD5), | ||||||
|  | 		headers.Get(headerContentType), | ||||||
|  | 		"", // Empty date because x-ms-date is expected (as per web page above)
 | ||||||
|  | 		headers.Get(headerIfModifiedSince), | ||||||
|  | 		headers.Get(headerIfMatch), | ||||||
|  | 		headers.Get(headerIfNoneMatch), | ||||||
|  | 		headers.Get(headerIfUnmodifiedSince), | ||||||
|  | 		headers.Get(headerRange), | ||||||
|  | 		buildCanonicalizedHeader(headers), | ||||||
|  | 		canonicalizedResource, | ||||||
|  | 	}, "\n") | ||||||
|  | 	return stringToSign, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func buildCanonicalizedHeader(headers http.Header) string { | ||||||
|  | 	cm := map[string][]string{} | ||||||
|  | 	for k, v := range headers { | ||||||
|  | 		headerName := strings.TrimSpace(strings.ToLower(k)) | ||||||
|  | 		if strings.HasPrefix(headerName, "x-ms-") { | ||||||
|  | 			cm[headerName] = v // NOTE: the value must not have any whitespace around it.
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if len(cm) == 0 { | ||||||
|  | 		return "" | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	keys := make([]string, 0, len(cm)) | ||||||
|  | 	for key := range cm { | ||||||
|  | 		keys = append(keys, key) | ||||||
|  | 	} | ||||||
|  | 	sort.Strings(keys) | ||||||
|  | 	ch := bytes.NewBufferString("") | ||||||
|  | 	for i, key := range keys { | ||||||
|  | 		if i > 0 { | ||||||
|  | 			ch.WriteRune('\n') | ||||||
|  | 		} | ||||||
|  | 		ch.WriteString(key) | ||||||
|  | 		ch.WriteRune(':') | ||||||
|  | 		ch.WriteString(strings.Join(cm[key], ",")) | ||||||
|  | 	} | ||||||
|  | 	return string(ch.Bytes()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { | ||||||
|  | 	// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
 | ||||||
|  | 	cr := bytes.NewBufferString("/") | ||||||
|  | 	cr.WriteString(f.accountName) | ||||||
|  | 
 | ||||||
|  | 	if len(u.Path) > 0 { | ||||||
|  | 		// Any portion of the CanonicalizedResource string that is derived from
 | ||||||
|  | 		// the resource's URI should be encoded exactly as it is in the URI.
 | ||||||
|  | 		// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
 | ||||||
|  | 		cr.WriteString(u.EscapedPath()) | ||||||
|  | 	} else { | ||||||
|  | 		// a slash is required to indicate the root path
 | ||||||
|  | 		cr.WriteString("/") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// params is a map[string][]string; param name is key; params values is []string
 | ||||||
|  | 	params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
 | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(params) > 0 { // There is at least 1 query parameter
 | ||||||
|  | 		paramNames := []string{} // We use this to sort the parameter key names
 | ||||||
|  | 		for paramName := range params { | ||||||
|  | 			paramNames = append(paramNames, paramName) // paramNames must be lowercase
 | ||||||
|  | 		} | ||||||
|  | 		sort.Strings(paramNames) | ||||||
|  | 
 | ||||||
|  | 		for _, paramName := range paramNames { | ||||||
|  | 			paramValues := params[paramName] | ||||||
|  | 			sort.Strings(paramValues) | ||||||
|  | 
 | ||||||
|  | 			// Join the sorted key values separated by ','
 | ||||||
|  | 			// Then prepend "keyName:"; then add this string to the buffer
 | ||||||
|  | 			cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return string(cr.Bytes()), nil | ||||||
|  | } | ||||||
							
								
								
									
										137
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										137
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,137 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"errors" | ||||||
|  | 	"sync/atomic" | ||||||
|  | 
 | ||||||
|  | 	"runtime" | ||||||
|  | 	"sync" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // TokenRefresher represents a callback method that you write; this method is called periodically
 | ||||||
|  | // so you can refresh the token credential's value.
 | ||||||
|  | type TokenRefresher func(credential TokenCredential) time.Duration | ||||||
|  | 
 | ||||||
|  | // TokenCredential represents a token credential (which is also a pipeline.Factory).
 | ||||||
|  | type TokenCredential interface { | ||||||
|  | 	Credential | ||||||
|  | 	Token() string | ||||||
|  | 	SetToken(newToken string) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
 | ||||||
|  | // resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
 | ||||||
|  | // tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
 | ||||||
|  | // TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
 | ||||||
|  | // indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
 | ||||||
|  | // If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
 | ||||||
|  | // TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
 | ||||||
|  | // token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
 | ||||||
|  | func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { | ||||||
|  | 	tc := &tokenCredential{} | ||||||
|  | 	tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
 | ||||||
|  | 	if tokenRefresher == nil { | ||||||
|  | 		return tc // If no callback specified, return the simple tokenCredential
 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	tcwr := &tokenCredentialWithRefresh{token: tc} | ||||||
|  | 	tcwr.token.startRefresh(tokenRefresher) | ||||||
|  | 	runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { | ||||||
|  | 		deadTC.token.stopRefresh() | ||||||
|  | 		deadTC.token = nil //  Sanity (not really required)
 | ||||||
|  | 	}) | ||||||
|  | 	return tcwr | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // tokenCredentialWithRefresh is a wrapper over a token credential.
 | ||||||
|  | // When this wrapper object gets GC'd, it stops the tokenCredential's timer
 | ||||||
|  | // which allows the tokenCredential object to also be GC'd.
 | ||||||
|  | type tokenCredentialWithRefresh struct { | ||||||
|  | 	token *tokenCredential | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
 | ||||||
|  | func (*tokenCredentialWithRefresh) credentialMarker() {} | ||||||
|  | 
 | ||||||
|  | // Token returns the current token value
 | ||||||
|  | func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } | ||||||
|  | 
 | ||||||
|  | // SetToken changes the current token value
 | ||||||
|  | func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } | ||||||
|  | 
 | ||||||
|  | // New satisfies pipeline.Factory's New method creating a pipeline policy object.
 | ||||||
|  | func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return f.token.New(next, po) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | // tokenCredential is a pipeline.Factory is the credential's policy factory.
 | ||||||
|  | type tokenCredential struct { | ||||||
|  | 	token atomic.Value | ||||||
|  | 
 | ||||||
|  | 	// The members below are only used if the user specified a tokenRefresher callback function.
 | ||||||
|  | 	timer          *time.Timer | ||||||
|  | 	tokenRefresher TokenRefresher | ||||||
|  | 	lock           sync.Mutex | ||||||
|  | 	stopped        bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
 | ||||||
|  | func (*tokenCredential) credentialMarker() {} | ||||||
|  | 
 | ||||||
|  | // Token returns the current token value
 | ||||||
|  | func (f *tokenCredential) Token() string { return f.token.Load().(string) } | ||||||
|  | 
 | ||||||
|  | // SetToken changes the current token value
 | ||||||
|  | func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } | ||||||
|  | 
 | ||||||
|  | // startRefresh calls refresh which immediately calls tokenRefresher
 | ||||||
|  | // and then starts a timer to call tokenRefresher in the future.
 | ||||||
|  | func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { | ||||||
|  | 	f.tokenRefresher = tokenRefresher | ||||||
|  | 	f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
 | ||||||
|  | 	f.refresh() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // refresh calls the user's tokenRefresher so they can refresh the token (by
 | ||||||
|  | // calling SetToken) and then starts another time (based on the returned duration)
 | ||||||
|  | // in order to refresh the token again in the future.
 | ||||||
|  | func (f *tokenCredential) refresh() { | ||||||
|  | 	d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
 | ||||||
|  | 	if d > 0 {               // If duration is 0 or negative, refresher wants to not be called again
 | ||||||
|  | 		f.lock.Lock() | ||||||
|  | 		if !f.stopped { | ||||||
|  | 			f.timer = time.AfterFunc(d, f.refresh) | ||||||
|  | 		} | ||||||
|  | 		f.lock.Unlock() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stopRefresh stops any pending timer and sets stopped field to true to prevent
 | ||||||
|  | // any new timer from starting.
 | ||||||
|  | // NOTE: Stopping the timer allows the GC to destroy the tokenCredential object.
 | ||||||
|  | func (f *tokenCredential) stopRefresh() { | ||||||
|  | 	f.lock.Lock() | ||||||
|  | 	f.stopped = true | ||||||
|  | 	if f.timer != nil { | ||||||
|  | 		f.timer.Stop() | ||||||
|  | 	} | ||||||
|  | 	f.lock.Unlock() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New satisfies pipeline.Factory's New method creating a pipeline policy object.
 | ||||||
|  | func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 		if request.URL.Scheme != "https" { | ||||||
|  | 			// HTTPS must be used, otherwise the tokens are at the risk of being exposed
 | ||||||
|  | 			return nil, errors.New("token credentials require a URL using the https protocol scheme") | ||||||
|  | 		} | ||||||
|  | 		request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} | ||||||
|  | 		return next.Do(ctx, request) | ||||||
|  | 	}) | ||||||
|  | } | ||||||
							
								
								
									
										27
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										27
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,27 @@ | ||||||
|  | // +build linux darwin freebsd openbsd netbsd dragonfly
 | ||||||
|  | 
 | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"syscall" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type mmf []byte | ||||||
|  | 
 | ||||||
|  | func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { | ||||||
|  | 	prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only
 | ||||||
|  | 	if writable { | ||||||
|  | 		prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED | ||||||
|  | 	} | ||||||
|  | 	addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) | ||||||
|  | 	return mmf(addr), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *mmf) unmap() { | ||||||
|  | 	err := syscall.Munmap(*m) | ||||||
|  | 	*m = nil | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,38 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"reflect" | ||||||
|  | 	"syscall" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type mmf []byte | ||||||
|  | 
 | ||||||
|  | func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { | ||||||
|  | 	prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only
 | ||||||
|  | 	if writable { | ||||||
|  | 		prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) | ||||||
|  | 	} | ||||||
|  | 	hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) | ||||||
|  | 	if hMMF == 0 { | ||||||
|  | 		return nil, os.NewSyscallError("CreateFileMapping", errno) | ||||||
|  | 	} | ||||||
|  | 	defer syscall.CloseHandle(hMMF) | ||||||
|  | 	addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) | ||||||
|  | 	m := mmf{} | ||||||
|  | 	h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) | ||||||
|  | 	h.Data = addr | ||||||
|  | 	h.Len = length | ||||||
|  | 	h.Cap = h.Len | ||||||
|  | 	return m, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m *mmf) unmap() { | ||||||
|  | 	addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) | ||||||
|  | 	*m = mmf{} | ||||||
|  | 	err := syscall.UnmapViewOfFile(addr) | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") | ||||||
|  | 	} | ||||||
|  | } | ||||||
							
								
								
									
										46
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										46
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,46 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
 | ||||||
|  | type PipelineOptions struct { | ||||||
|  | 	// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
 | ||||||
|  | 	Log pipeline.LogOptions | ||||||
|  | 
 | ||||||
|  | 	// Retry configures the built-in retry policy behavior.
 | ||||||
|  | 	Retry RetryOptions | ||||||
|  | 
 | ||||||
|  | 	// RequestLog configures the built-in request logging policy.
 | ||||||
|  | 	RequestLog RequestLogOptions | ||||||
|  | 
 | ||||||
|  | 	// Telemetry configures the built-in telemetry policy behavior.
 | ||||||
|  | 	Telemetry TelemetryOptions | ||||||
|  | 
 | ||||||
|  | 	// HTTPSender configures the sender of HTTP requests
 | ||||||
|  | 	HTTPSender pipeline.Factory | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewPipeline creates a Pipeline using the specified credentials and options.
 | ||||||
|  | func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { | ||||||
|  | 	// Closest to API goes first; closest to the wire goes last
 | ||||||
|  | 	f := []pipeline.Factory{ | ||||||
|  | 		NewTelemetryPolicyFactory(o.Telemetry), | ||||||
|  | 		NewUniqueRequestIDPolicyFactory(), | ||||||
|  | 		NewRetryPolicyFactory(o.Retry), | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { | ||||||
|  | 		// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
 | ||||||
|  | 		// NOTE: The credential's policy factory must appear close to the wire so it can sign any
 | ||||||
|  | 		// changes made by other factories (like UniqueRequestIDPolicyFactory)
 | ||||||
|  | 		f = append(f, c) | ||||||
|  | 	} | ||||||
|  | 	f = append(f, | ||||||
|  | 		NewRequestLogPolicyFactory(o.RequestLog), | ||||||
|  | 		pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 	return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) | ||||||
|  | } | ||||||
							
								
								
									
										182
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										182
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,182 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"fmt" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"runtime" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // RequestLogOptions configures the retry policy's behavior.
 | ||||||
|  | type RequestLogOptions struct { | ||||||
|  | 	// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
 | ||||||
|  | 	// duration (-1=no logging; 0=default threshold).
 | ||||||
|  | 	LogWarningIfTryOverThreshold time.Duration | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o RequestLogOptions) defaults() RequestLogOptions { | ||||||
|  | 	if o.LogWarningIfTryOverThreshold == 0 { | ||||||
|  | 		// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
 | ||||||
|  | 		// But this monitors the time to get the HTTP response; NOT the time to download the response body.
 | ||||||
|  | 		o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
 | ||||||
|  | 	} | ||||||
|  | 	return o | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
 | ||||||
|  | func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { | ||||||
|  | 	o = o.defaults() // Force defaults to be calculated
 | ||||||
|  | 	return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { | ||||||
|  | 		// These variables are per-policy; shared by multiple calls to Do
 | ||||||
|  | 		var try int32 | ||||||
|  | 		operationStart := time.Now() // If this is the 1st try, record the operation state time
 | ||||||
|  | 		return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { | ||||||
|  | 			try++ // The first try is #1 (not #0)
 | ||||||
|  | 
 | ||||||
|  | 			// Log the outgoing request as informational
 | ||||||
|  | 			if po.ShouldLog(pipeline.LogInfo) { | ||||||
|  | 				b := &bytes.Buffer{} | ||||||
|  | 				fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) | ||||||
|  | 				pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) | ||||||
|  | 				po.Log(pipeline.LogInfo, b.String()) | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Set the time for this particular retry operation and then Do the operation.
 | ||||||
|  | 			tryStart := time.Now() | ||||||
|  | 			response, err = next.Do(ctx, request) // Make the request
 | ||||||
|  | 			tryEnd := time.Now() | ||||||
|  | 			tryDuration := tryEnd.Sub(tryStart) | ||||||
|  | 			opDuration := tryEnd.Sub(operationStart) | ||||||
|  | 
 | ||||||
|  | 			logLevel, forceLog := pipeline.LogInfo, false // Default logging information
 | ||||||
|  | 
 | ||||||
|  | 			// If the response took too long, we'll upgrade to warning.
 | ||||||
|  | 			if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { | ||||||
|  | 				// Log a warning if the try duration exceeded the specified threshold
 | ||||||
|  | 				logLevel, forceLog = pipeline.LogWarning, true | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if err == nil { // We got a response from the service
 | ||||||
|  | 				sc := response.Response().StatusCode | ||||||
|  | 				if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { | ||||||
|  | 					logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx
 | ||||||
|  | 				} else { | ||||||
|  | 					// For other status codes, we leave the level as is.
 | ||||||
|  | 				} | ||||||
|  | 			} else { // This error did not get an HTTP response from the service; upgrade the severity to Error
 | ||||||
|  | 				logLevel, forceLog = pipeline.LogError, true | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { | ||||||
|  | 				// We're going to log this; build the string to log
 | ||||||
|  | 				b := &bytes.Buffer{} | ||||||
|  | 				slow := "" | ||||||
|  | 				if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { | ||||||
|  | 					slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) | ||||||
|  | 				} | ||||||
|  | 				fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) | ||||||
|  | 				if err != nil { // This HTTP request did not get a response from the service
 | ||||||
|  | 					fmt.Fprint(b, "REQUEST ERROR\n") | ||||||
|  | 				} else { | ||||||
|  | 					if logLevel == pipeline.LogError { | ||||||
|  | 						fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") | ||||||
|  | 					} else { | ||||||
|  | 						fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) | ||||||
|  | 				if logLevel <= pipeline.LogError { | ||||||
|  | 					b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
 | ||||||
|  | 				} | ||||||
|  | 				msg := b.String() | ||||||
|  | 
 | ||||||
|  | 				if forceLog { | ||||||
|  | 					pipeline.ForceLog(logLevel, msg) | ||||||
|  | 				} | ||||||
|  | 				if shouldLog { | ||||||
|  | 					po.Log(logLevel, msg) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			return response, err | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
 | ||||||
|  | func RedactSigQueryParam(rawQuery string) (bool, string) { | ||||||
|  | 	rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
 | ||||||
|  | 	sigFound := strings.Contains(rawQuery, "?sig=") | ||||||
|  | 	if !sigFound { | ||||||
|  | 		sigFound = strings.Contains(rawQuery, "&sig=") | ||||||
|  | 		if !sigFound { | ||||||
|  | 			return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// [?|&]sig= found, redact its value
 | ||||||
|  | 	values, _ := url.ParseQuery(rawQuery) | ||||||
|  | 	for name := range values { | ||||||
|  | 		if strings.EqualFold(name, "sig") { | ||||||
|  | 			values[name] = []string{"REDACTED"} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return sigFound, values.Encode() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func prepareRequestForLogging(request pipeline.Request) *http.Request { | ||||||
|  | 	req := request | ||||||
|  | 	if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { | ||||||
|  | 		// Make copy so we don't destroy the query parameters we actually need to send in the request
 | ||||||
|  | 		req = request.Copy() | ||||||
|  | 		req.Request.URL.RawQuery = rawQuery | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return prepareRequestForServiceLogging(req) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func stack() []byte { | ||||||
|  | 	buf := make([]byte, 1024) | ||||||
|  | 	for { | ||||||
|  | 		n := runtime.Stack(buf, false) | ||||||
|  | 		if n < len(buf) { | ||||||
|  | 			return buf[:n] | ||||||
|  | 		} | ||||||
|  | 		buf = make([]byte, 2*len(buf)) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | // Redact phase useful for blob and file service only. For other services,
 | ||||||
|  | // this method can directly return request.Request.
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { | ||||||
|  | 	req := request | ||||||
|  | 	if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { | ||||||
|  | 		req = request.Copy() | ||||||
|  | 		url, err := url.Parse(req.Header.Get(key)) | ||||||
|  | 		if err == nil { | ||||||
|  | 			if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { | ||||||
|  | 				url.RawQuery = rawQuery | ||||||
|  | 				req.Header.Set(xMsCopySourceHeader, url.String()) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return req.Request | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const xMsCopySourceHeader = "x-ms-copy-source" | ||||||
|  | 
 | ||||||
|  | func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { | ||||||
|  | 	for keyInHeader := range header { | ||||||
|  | 		if strings.EqualFold(keyInHeader, key) { | ||||||
|  | 			return true, keyInHeader | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false, "" | ||||||
|  | } | ||||||
							
								
								
									
										412
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										412
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,412 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"math/rand" | ||||||
|  | 	"net" | ||||||
|  | 	"net/http" | ||||||
|  | 	"strconv" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
 | ||||||
|  | type RetryPolicy int32 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
 | ||||||
|  | 	RetryPolicyExponential RetryPolicy = 0 | ||||||
|  | 
 | ||||||
|  | 	// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
 | ||||||
|  | 	RetryPolicyFixed RetryPolicy = 1 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // RetryOptions configures the retry policy's behavior.
 | ||||||
|  | type RetryOptions struct { | ||||||
|  | 	// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
 | ||||||
|  | 	// A value of zero means that you accept our default policy.
 | ||||||
|  | 	Policy RetryPolicy | ||||||
|  | 
 | ||||||
|  | 	// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
 | ||||||
|  | 	// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
 | ||||||
|  | 	MaxTries int32 | ||||||
|  | 
 | ||||||
|  | 	// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
 | ||||||
|  | 	// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
 | ||||||
|  | 	// of data, the default TryTimeout will probably not be sufficient. You should override this value
 | ||||||
|  | 	// based on the bandwidth available to the host machine and proximity to the Storage service. A good
 | ||||||
|  | 	// starting point may be something like (60 seconds per MB of anticipated-payload-size).
 | ||||||
|  | 	TryTimeout time.Duration | ||||||
|  | 
 | ||||||
|  | 	// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
 | ||||||
|  | 	// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
 | ||||||
|  | 	// with each retry up to a maximum specified by MaxRetryDelay.
 | ||||||
|  | 	// If you specify 0, then you must also specify 0 for MaxRetryDelay.
 | ||||||
|  | 	// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
 | ||||||
|  | 	// equal to or greater than RetryDelay.
 | ||||||
|  | 	RetryDelay time.Duration | ||||||
|  | 
 | ||||||
|  | 	// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
 | ||||||
|  | 	// If you specify 0, then you must also specify 0 for RetryDelay.
 | ||||||
|  | 	MaxRetryDelay time.Duration | ||||||
|  | 
 | ||||||
|  | 	// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
 | ||||||
|  | 	// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
 | ||||||
|  | 	// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
 | ||||||
|  | 	// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
 | ||||||
|  | 	RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o RetryOptions) retryReadsFromSecondaryHost() string { | ||||||
|  | 	return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
 | ||||||
|  | 	//return "" // This is for non-blob SDKs
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o RetryOptions) defaults() RetryOptions { | ||||||
|  | 	// We assume the following:
 | ||||||
|  | 	// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
 | ||||||
|  | 	// 2. o.MaxTries >= 0
 | ||||||
|  | 	// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
 | ||||||
|  | 	// 4. o.RetryDelay <= o.MaxRetryDelay
 | ||||||
|  | 	// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
 | ||||||
|  | 
 | ||||||
|  | 	IfDefault := func(current *time.Duration, desired time.Duration) { | ||||||
|  | 		if *current == time.Duration(0) { | ||||||
|  | 			*current = desired | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Set defaults if unspecified
 | ||||||
|  | 	if o.MaxTries == 0 { | ||||||
|  | 		o.MaxTries = 4 | ||||||
|  | 	} | ||||||
|  | 	switch o.Policy { | ||||||
|  | 	case RetryPolicyExponential: | ||||||
|  | 		IfDefault(&o.TryTimeout, 1*time.Minute) | ||||||
|  | 		IfDefault(&o.RetryDelay, 4*time.Second) | ||||||
|  | 		IfDefault(&o.MaxRetryDelay, 120*time.Second) | ||||||
|  | 
 | ||||||
|  | 	case RetryPolicyFixed: | ||||||
|  | 		IfDefault(&o.TryTimeout, 1*time.Minute) | ||||||
|  | 		IfDefault(&o.RetryDelay, 30*time.Second) | ||||||
|  | 		IfDefault(&o.MaxRetryDelay, 120*time.Second) | ||||||
|  | 	} | ||||||
|  | 	return o | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
 | ||||||
|  | 	pow := func(number int64, exponent int32) int64 { // pow is nested helper function
 | ||||||
|  | 		var result int64 = 1 | ||||||
|  | 		for n := int32(0); n < exponent; n++ { | ||||||
|  | 			result *= number | ||||||
|  | 		} | ||||||
|  | 		return result | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	delay := time.Duration(0) | ||||||
|  | 	switch o.Policy { | ||||||
|  | 	case RetryPolicyExponential: | ||||||
|  | 		delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay | ||||||
|  | 
 | ||||||
|  | 	case RetryPolicyFixed: | ||||||
|  | 		if try > 1 { // Any try after the 1st uses the fixed delay
 | ||||||
|  | 			delay = o.RetryDelay | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Introduce some jitter:  [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
 | ||||||
|  | 	// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
 | ||||||
|  | 	delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
 | ||||||
|  | 	if delay > o.MaxRetryDelay { | ||||||
|  | 		delay = o.MaxRetryDelay | ||||||
|  | 	} | ||||||
|  | 	return delay | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
 | ||||||
|  | func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { | ||||||
|  | 	o = o.defaults() // Force defaults to be calculated
 | ||||||
|  | 	return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { | ||||||
|  | 		return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { | ||||||
|  | 			// Before each try, we'll select either the primary or secondary URL.
 | ||||||
|  | 			primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
 | ||||||
|  | 
 | ||||||
|  | 			// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
 | ||||||
|  | 			considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" | ||||||
|  | 
 | ||||||
|  | 			// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
 | ||||||
|  | 			// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
 | ||||||
|  | 			// If using a secondary:
 | ||||||
|  | 			//    Even tries go against primary; odd tries go against the secondary
 | ||||||
|  | 			//    For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
 | ||||||
|  | 			//    If secondary gets a 404, don't fail, retry but future retries are only against the primary
 | ||||||
|  | 			//    When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
 | ||||||
|  | 			for try := int32(1); try <= o.MaxTries; try++ { | ||||||
|  | 				logf("\n=====> Try=%d\n", try) | ||||||
|  | 
 | ||||||
|  | 				// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
 | ||||||
|  | 				tryingPrimary := !considerSecondary || (try%2 == 1) | ||||||
|  | 				// Select the correct host and delay
 | ||||||
|  | 				if tryingPrimary { | ||||||
|  | 					primaryTry++ | ||||||
|  | 					delay := o.calcDelay(primaryTry) | ||||||
|  | 					logf("Primary try=%d, Delay=%v\n", primaryTry, delay) | ||||||
|  | 					time.Sleep(delay) // The 1st try returns 0 delay
 | ||||||
|  | 				} else { | ||||||
|  | 					// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
 | ||||||
|  | 					delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) | ||||||
|  | 					logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) | ||||||
|  | 					time.Sleep(delay) // Delay with some jitter before trying secondary
 | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				// Clone the original request to ensure that each try starts with the original (unmutated) request.
 | ||||||
|  | 				requestCopy := request.Copy() | ||||||
|  | 
 | ||||||
|  | 				// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
 | ||||||
|  | 				// the stream may not be at offset 0 when we first get it and we want the same behavior for the
 | ||||||
|  | 				// 1st try as for additional tries.
 | ||||||
|  | 				err = requestCopy.RewindBody() | ||||||
|  | 				if err != nil { | ||||||
|  | 					return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				if !tryingPrimary { | ||||||
|  | 					requestCopy.URL.Host = o.retryReadsFromSecondaryHost() | ||||||
|  | 					requestCopy.Host = o.retryReadsFromSecondaryHost() | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				// Set the server-side timeout query parameter "timeout=[seconds]"
 | ||||||
|  | 				timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
 | ||||||
|  | 				if deadline, ok := ctx.Deadline(); ok {  // If user's ctx has a deadline, make the timeout the smaller of the two
 | ||||||
|  | 					t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
 | ||||||
|  | 					logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) | ||||||
|  | 					if t < timeout { | ||||||
|  | 						timeout = t | ||||||
|  | 					} | ||||||
|  | 					if timeout < 0 { | ||||||
|  | 						timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
 | ||||||
|  | 					} | ||||||
|  | 					logf("TryTimeout adjusted to=%d sec\n", timeout) | ||||||
|  | 				} | ||||||
|  | 				q := requestCopy.Request.URL.Query() | ||||||
|  | 				q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
 | ||||||
|  | 				requestCopy.Request.URL.RawQuery = q.Encode() | ||||||
|  | 				logf("Url=%s\n", requestCopy.Request.URL.String()) | ||||||
|  | 
 | ||||||
|  | 				// Set the time for this particular retry operation and then Do the operation.
 | ||||||
|  | 				tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout)) | ||||||
|  | 				//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
 | ||||||
|  | 				response, err = next.Do(tryCtx, requestCopy) // Make the request
 | ||||||
|  | 				/*err = improveDeadlineExceeded(err) | ||||||
|  | 				if err == nil { | ||||||
|  | 					response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} | ||||||
|  | 				}*/ | ||||||
|  | 				logf("Err=%v, response=%v\n", err, response) | ||||||
|  | 
 | ||||||
|  | 				action := "" // This MUST get changed within the switch code below
 | ||||||
|  | 				switch { | ||||||
|  | 				case ctx.Err() != nil: | ||||||
|  | 					action = "NoRetry: Op timeout" | ||||||
|  | 				case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: | ||||||
|  | 					// If attempt was against the secondary & it returned a StatusNotFound (404), then
 | ||||||
|  | 					// the resource was not found. This may be due to replication delay. So, in this
 | ||||||
|  | 					// case, we'll never try the secondary again for this operation.
 | ||||||
|  | 					considerSecondary = false | ||||||
|  | 					action = "Retry: Secondary URL returned 404" | ||||||
|  | 				case err != nil: | ||||||
|  | 					// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
 | ||||||
|  | 					// Use ServiceCode to verify if the error is related to storage service-side,
 | ||||||
|  | 					// ServiceCode is set only when error related to storage service happened.
 | ||||||
|  | 					if stErr, ok := err.(StorageError); ok { | ||||||
|  | 						if stErr.Temporary() { | ||||||
|  | 							action = "Retry: StorageError with error service code and Temporary()" | ||||||
|  | 						} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
 | ||||||
|  | 							action = "Retry: StorageError with success status code" | ||||||
|  | 						} else { | ||||||
|  | 							action = "NoRetry: StorageError not Temporary() and without retriable status code" | ||||||
|  | 						} | ||||||
|  | 					} else if netErr, ok := err.(net.Error); ok { | ||||||
|  | 						// Use non-retriable net.Error list, but not retriable list.
 | ||||||
|  | 						// As there are errors without Temporary() implementation,
 | ||||||
|  | 						// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
 | ||||||
|  | 						// So the SDK do retry for most of the case, unless the error should not be retried for sure.
 | ||||||
|  | 						if !isNotRetriable(netErr) { | ||||||
|  | 							action = "Retry: net.Error and not in the non-retriable list" | ||||||
|  | 						} else { | ||||||
|  | 							action = "NoRetry: net.Error and in the non-retriable list" | ||||||
|  | 						} | ||||||
|  | 					} else { | ||||||
|  | 						action = "NoRetry: unrecognized error" | ||||||
|  | 					} | ||||||
|  | 				default: | ||||||
|  | 					action = "NoRetry: successful HTTP request" // no error
 | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				logf("Action=%s\n", action) | ||||||
|  | 				// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
 | ||||||
|  | 				if action[0] != 'R' { // Retry only if action starts with 'R'
 | ||||||
|  | 					if err != nil { | ||||||
|  | 						tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
 | ||||||
|  | 					} else { | ||||||
|  | 						// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
 | ||||||
|  | 						// So, when the user closes the Body, the our per-try context gets closed too.
 | ||||||
|  | 						// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
 | ||||||
|  | 						if response == nil || response.Response() == nil { | ||||||
|  | 							// We do panic in the case response or response.Response() is nil,
 | ||||||
|  | 							// as for client, the response should not be nil if request is sent and the operations is executed successfully.
 | ||||||
|  | 							// Another option, is that execute the cancel function when response or response.Response() is nil,
 | ||||||
|  | 							// as in this case, current per-try has nothing to do in future.
 | ||||||
|  | 							return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") | ||||||
|  | 						} | ||||||
|  | 						response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} | ||||||
|  | 					} | ||||||
|  | 					break // Don't retry
 | ||||||
|  | 				} | ||||||
|  | 				if response != nil && response.Response() != nil && response.Response().Body != nil { | ||||||
|  | 					// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
 | ||||||
|  | 					body := response.Response().Body | ||||||
|  | 					io.Copy(ioutil.Discard, body) | ||||||
|  | 					body.Close() | ||||||
|  | 				} | ||||||
|  | 				// If retrying, cancel the current per-try timeout context
 | ||||||
|  | 				tryCancel() | ||||||
|  | 			} | ||||||
|  | 			return response, err // Not retryable or too many retries; return the last response/error
 | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
 | ||||||
|  | type contextCancelReadCloser struct { | ||||||
|  | 	cf   context.CancelFunc | ||||||
|  | 	body io.ReadCloser | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { | ||||||
|  | 	return rc.body.Read(p) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (rc *contextCancelReadCloser) Close() error { | ||||||
|  | 	err := rc.body.Close() | ||||||
|  | 	if rc.cf != nil { | ||||||
|  | 		rc.cf() | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // isNotRetriable checks if the provided net.Error isn't retriable.
 | ||||||
|  | func isNotRetriable(errToParse net.Error) bool { | ||||||
|  | 	// No error, so this is NOT retriable.
 | ||||||
|  | 	if errToParse == nil { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// The error is either temporary or a timeout so it IS retriable (not not retriable).
 | ||||||
|  | 	if errToParse.Temporary() || errToParse.Timeout() { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	genericErr := error(errToParse) | ||||||
|  | 
 | ||||||
|  | 	// From here all the error are neither Temporary() nor Timeout().
 | ||||||
|  | 	switch err := errToParse.(type) { | ||||||
|  | 	case *net.OpError: | ||||||
|  | 		// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
 | ||||||
|  | 		if err.Err == nil { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 		genericErr = err.Err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch genericErr.(type) { | ||||||
|  | 	case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: | ||||||
|  | 		// If the error is one of the ones listed, then it is NOT retriable.
 | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
 | ||||||
|  | 	// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
 | ||||||
|  | 	if strings.Contains(genericErr.Error(), "invalid header field") { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Assume the error is retriable.
 | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} | ||||||
|  | 
 | ||||||
|  | func isSuccessStatusCode(resp *http.Response) bool { | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	for _, i := range successStatusCodes { | ||||||
|  | 		if i == resp.StatusCode { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
 | ||||||
|  | var logf = func(format string, a ...interface{}) {} | ||||||
|  | 
 | ||||||
|  | // Use this version to see the retry method's code path (import "fmt")
 | ||||||
|  | //var logf = fmt.Printf
 | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | type deadlineExceededReadCloser struct { | ||||||
|  | 	r io.ReadCloser | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { | ||||||
|  | 	n, err := 0, io.EOF | ||||||
|  | 	if r.r != nil { | ||||||
|  | 		n, err = r.r.Read(p) | ||||||
|  | 	} | ||||||
|  | 	return n, improveDeadlineExceeded(err) | ||||||
|  | } | ||||||
|  | func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { | ||||||
|  | 	// For an HTTP request, the ReadCloser MUST also implement seek
 | ||||||
|  | 	// For an HTTP response, Seek MUST not be called (or this will panic)
 | ||||||
|  | 	o, err := r.r.(io.Seeker).Seek(offset, whence) | ||||||
|  | 	return o, improveDeadlineExceeded(err) | ||||||
|  | } | ||||||
|  | func (r *deadlineExceededReadCloser) Close() error { | ||||||
|  | 	if c, ok := r.r.(io.Closer); ok { | ||||||
|  | 		c.Close() | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // timeoutError is the internal struct that implements our richer timeout error.
 | ||||||
|  | type deadlineExceeded struct { | ||||||
|  | 	responseError | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
 | ||||||
|  | 
 | ||||||
|  | // improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
 | ||||||
|  | func improveDeadlineExceeded(cause error) error { | ||||||
|  | 	// If cause is not DeadlineExceeded, return the same error passed in.
 | ||||||
|  | 	if cause != context.DeadlineExceeded { | ||||||
|  | 		return cause | ||||||
|  | 	} | ||||||
|  | 	// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
 | ||||||
|  | 	return &deadlineExceeded{ | ||||||
|  | 		responseError: responseError{ | ||||||
|  | 			ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error implements the error interface's Error method to return a string representation of the error.
 | ||||||
|  | func (e *deadlineExceeded) Error() string { | ||||||
|  | 	return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") | ||||||
|  | } | ||||||
|  | */ | ||||||
							
								
								
									
										51
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										51
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,51 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"fmt" | ||||||
|  | 	"os" | ||||||
|  | 	"runtime" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // TelemetryOptions configures the telemetry policy's behavior.
 | ||||||
|  | type TelemetryOptions struct { | ||||||
|  | 	// Value is a string prepended to each request's User-Agent and sent to the service.
 | ||||||
|  | 	// The service records the user-agent in logs for diagnostics and tracking of client requests.
 | ||||||
|  | 	Value string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
 | ||||||
|  | // which add telemetry information to outgoing HTTP requests.
 | ||||||
|  | func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { | ||||||
|  | 	b := &bytes.Buffer{} | ||||||
|  | 	b.WriteString(o.Value) | ||||||
|  | 	if b.Len() > 0 { | ||||||
|  | 		b.WriteRune(' ') | ||||||
|  | 	} | ||||||
|  | 	fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) | ||||||
|  | 	telemetryValue := b.String() | ||||||
|  | 
 | ||||||
|  | 	return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { | ||||||
|  | 		return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 			request.Header.Set("User-Agent", telemetryValue) | ||||||
|  | 			return next.Do(ctx, request) | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NOTE: the ONLY function that should write to this variable is this func
 | ||||||
|  | var platformInfo = func() string { | ||||||
|  | 	// Azure-Storage/version (runtime; os type and version)”
 | ||||||
|  | 	// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
 | ||||||
|  | 	operatingSystem := runtime.GOOS // Default OS string
 | ||||||
|  | 	switch operatingSystem { | ||||||
|  | 	case "windows": | ||||||
|  | 		operatingSystem = os.Getenv("OS") // Get more specific OS information
 | ||||||
|  | 	case "linux": // accept default OS info
 | ||||||
|  | 	case "freebsd": //  accept default OS info
 | ||||||
|  | 	} | ||||||
|  | 	return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) | ||||||
|  | }() | ||||||
							
								
								
									
										24
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										24
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,24 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object
 | ||||||
|  | // that sets the request's x-ms-client-request-id header if it doesn't already exist.
 | ||||||
|  | func NewUniqueRequestIDPolicyFactory() pipeline.Factory { | ||||||
|  | 	return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { | ||||||
|  | 		// This is Policy's Do method:
 | ||||||
|  | 		return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 			id := request.Header.Get(xMsClientRequestID) | ||||||
|  | 			if id == "" { // Add a unique request ID if the caller didn't specify one already
 | ||||||
|  | 				request.Header.Set(xMsClientRequestID, newUUID().String()) | ||||||
|  | 			} | ||||||
|  | 			return next.Do(ctx, request) | ||||||
|  | 		} | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const xMsClientRequestID = "x-ms-client-request-id" | ||||||
							
								
								
									
										178
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										178
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,178 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"io" | ||||||
|  | 	"net" | ||||||
|  | 	"net/http" | ||||||
|  | 	"strings" | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const CountToEnd = 0 | ||||||
|  | 
 | ||||||
|  | // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
 | ||||||
|  | type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) | ||||||
|  | 
 | ||||||
|  | // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
 | ||||||
|  | // that should be used to make an HTTP GET request.
 | ||||||
|  | type HTTPGetterInfo struct { | ||||||
|  | 	// Offset specifies the start offset that should be used when
 | ||||||
|  | 	// creating the HTTP GET request's Range header
 | ||||||
|  | 	Offset int64 | ||||||
|  | 
 | ||||||
|  | 	// Count specifies the count of bytes that should be used to calculate
 | ||||||
|  | 	// the end offset when creating the HTTP GET request's Range header
 | ||||||
|  | 	Count int64 | ||||||
|  | 
 | ||||||
|  | 	// ETag specifies the resource's etag that should be used when creating
 | ||||||
|  | 	// the HTTP GET request's If-Match header
 | ||||||
|  | 	ETag ETag | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FailedReadNotifier is a function type that represents the notification function called when a read fails
 | ||||||
|  | type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) | ||||||
|  | 
 | ||||||
|  | // RetryReaderOptions contains properties which can help to decide when to do retry.
 | ||||||
|  | type RetryReaderOptions struct { | ||||||
|  | 	// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made
 | ||||||
|  | 	// while reading from a RetryReader. A value of zero means that no additional HTTP
 | ||||||
|  | 	// GET requests will be made.
 | ||||||
|  | 	MaxRetryRequests   int | ||||||
|  | 	doInjectError      bool | ||||||
|  | 	doInjectErrorRound int | ||||||
|  | 
 | ||||||
|  | 	// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging.
 | ||||||
|  | 	NotifyFailedRead FailedReadNotifier | ||||||
|  | 
 | ||||||
|  | 	// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
 | ||||||
|  | 	// retryReader has the following special behaviour: closing the response body before it is all read is treated as a
 | ||||||
|  | 	// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
 | ||||||
|  | 	// read is too slow, caller may want to force a retry in the hope that the retry will be quicker).  If
 | ||||||
|  | 	// TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead
 | ||||||
|  | 	// treated as a fatal (non-retryable) error.
 | ||||||
|  | 	// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
 | ||||||
|  | 	// from the same "thread" (goroutine) as Read.  Concurrent Close calls from other goroutines may instead produce network errors
 | ||||||
|  | 	// which will be retried.
 | ||||||
|  | 	TreatEarlyCloseAsError bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // retryReader implements io.ReaderCloser methods.
 | ||||||
|  | // retryReader tries to read from response, and if there is retriable network error
 | ||||||
|  | // returned during reading, it will retry according to retry reader option through executing
 | ||||||
|  | // user defined action with provided data to get a new response, and continue the overall reading process
 | ||||||
|  | // through reading from the new response.
 | ||||||
|  | type retryReader struct { | ||||||
|  | 	ctx             context.Context | ||||||
|  | 	info            HTTPGetterInfo | ||||||
|  | 	countWasBounded bool | ||||||
|  | 	o               RetryReaderOptions | ||||||
|  | 	getter          HTTPGetter | ||||||
|  | 
 | ||||||
|  | 	// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
 | ||||||
|  | 	responseMu *sync.Mutex | ||||||
|  | 	response   *http.Response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewRetryReader creates a retry reader.
 | ||||||
|  | func NewRetryReader(ctx context.Context, initialResponse *http.Response, | ||||||
|  | 	info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { | ||||||
|  | 	return &retryReader{ | ||||||
|  | 		ctx:             ctx, | ||||||
|  | 		getter:          getter, | ||||||
|  | 		info:            info, | ||||||
|  | 		countWasBounded: info.Count != CountToEnd, | ||||||
|  | 		response:        initialResponse, | ||||||
|  | 		responseMu:      &sync.Mutex{}, | ||||||
|  | 		o:               o} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *retryReader) setResponse(r *http.Response) { | ||||||
|  | 	s.responseMu.Lock() | ||||||
|  | 	defer s.responseMu.Unlock() | ||||||
|  | 	s.response = r | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *retryReader) Read(p []byte) (n int, err error) { | ||||||
|  | 	for try := 0; ; try++ { | ||||||
|  | 		//fmt.Println(try)       // Comment out for debugging.
 | ||||||
|  | 		if s.countWasBounded && s.info.Count == CountToEnd { | ||||||
|  | 			// User specified an original count and the remaining bytes are 0, return 0, EOF
 | ||||||
|  | 			return 0, io.EOF | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		s.responseMu.Lock() | ||||||
|  | 		resp := s.response | ||||||
|  | 		s.responseMu.Unlock() | ||||||
|  | 		if resp == nil { // We don't have a response stream to read from, try to get one.
 | ||||||
|  | 			newResponse, err := s.getter(s.ctx, s.info) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return 0, err | ||||||
|  | 			} | ||||||
|  | 			// Successful GET; this is the network stream we'll read from.
 | ||||||
|  | 			s.setResponse(newResponse) | ||||||
|  | 			resp = newResponse | ||||||
|  | 		} | ||||||
|  | 		n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
 | ||||||
|  | 
 | ||||||
|  | 		// Injection mechanism for testing.
 | ||||||
|  | 		if s.o.doInjectError && try == s.o.doInjectErrorRound { | ||||||
|  | 			err = &net.DNSError{IsTemporary: true} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// We successfully read data or end EOF.
 | ||||||
|  | 		if err == nil || err == io.EOF { | ||||||
|  | 			s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
 | ||||||
|  | 			if s.info.Count != CountToEnd { | ||||||
|  | 				s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
 | ||||||
|  | 			} | ||||||
|  | 			return n, err // Return the return to the caller
 | ||||||
|  | 		} | ||||||
|  | 		s.Close()          // Error, close stream
 | ||||||
|  | 		s.setResponse(nil) // Our stream is no longer good
 | ||||||
|  | 
 | ||||||
|  | 		// Check the retry count and error code, and decide whether to retry.
 | ||||||
|  | 		retriesExhausted := try >= s.o.MaxRetryRequests | ||||||
|  | 		_, isNetError := err.(net.Error) | ||||||
|  | 		willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted | ||||||
|  | 
 | ||||||
|  | 		// Notify, for logging purposes, of any failures
 | ||||||
|  | 		if s.o.NotifyFailedRead != nil { | ||||||
|  | 			failureCount := try + 1 // because try is zero-based
 | ||||||
|  | 			s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if willRetry { | ||||||
|  | 			continue | ||||||
|  | 			// Loop around and try to get and read from new stream.
 | ||||||
|  | 		} | ||||||
|  | 		return n, err // Not retryable, or retries exhausted, so just return
 | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry
 | ||||||
|  | // Is this safe, to close early from another goroutine?  Early close ultimately ends up calling
 | ||||||
|  | // net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors"
 | ||||||
|  | // which is exactly the behaviour we want.
 | ||||||
|  | // NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read)
 | ||||||
|  | // then there are two different types of error that may happen - either the one one we check for here,
 | ||||||
|  | // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
 | ||||||
|  | // to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
 | ||||||
|  | func (s *retryReader) wasRetryableEarlyClose(err error) bool { | ||||||
|  | 	if s.o.TreatEarlyCloseAsError { | ||||||
|  | 		return false // user wants all early closes to be errors, and so not retryable
 | ||||||
|  | 	} | ||||||
|  | 	// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
 | ||||||
|  | 	return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | const ReadOnClosedBodyMessage = "read on closed response body" | ||||||
|  | 
 | ||||||
|  | func (s *retryReader) Close() error { | ||||||
|  | 	s.responseMu.Lock() | ||||||
|  | 	defer s.responseMu.Unlock() | ||||||
|  | 	if s.response != nil && s.response.Body != nil { | ||||||
|  | 		return s.response.Body.Close() | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										219
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										219
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,219 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
 | ||||||
|  | type AccountSASSignatureValues struct { | ||||||
|  | 	Version       string      `param:"sv"`  // If not specified, this defaults to SASVersion
 | ||||||
|  | 	Protocol      SASProtocol `param:"spr"` // See the SASProtocol* constants
 | ||||||
|  | 	StartTime     time.Time   `param:"st"`  // Not specified if IsZero
 | ||||||
|  | 	ExpiryTime    time.Time   `param:"se"`  // Not specified if IsZero
 | ||||||
|  | 	Permissions   string      `param:"sp"`  // Create by initializing a AccountSASPermissions and then call String()
 | ||||||
|  | 	IPRange       IPRange     `param:"sip"` | ||||||
|  | 	Services      string      `param:"ss"`  // Create by initializing AccountSASServices and then call String()
 | ||||||
|  | 	ResourceTypes string      `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce
 | ||||||
|  | // the proper SAS query parameters.
 | ||||||
|  | func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { | ||||||
|  | 	// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
 | ||||||
|  | 	if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { | ||||||
|  | 		return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") | ||||||
|  | 	} | ||||||
|  | 	if v.Version == "" { | ||||||
|  | 		v.Version = SASVersion | ||||||
|  | 	} | ||||||
|  | 	perms := &AccountSASPermissions{} | ||||||
|  | 	if err := perms.Parse(v.Permissions); err != nil { | ||||||
|  | 		return SASQueryParameters{}, err | ||||||
|  | 	} | ||||||
|  | 	v.Permissions = perms.String() | ||||||
|  | 
 | ||||||
|  | 	startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) | ||||||
|  | 
 | ||||||
|  | 	stringToSign := strings.Join([]string{ | ||||||
|  | 		sharedKeyCredential.AccountName(), | ||||||
|  | 		v.Permissions, | ||||||
|  | 		v.Services, | ||||||
|  | 		v.ResourceTypes, | ||||||
|  | 		startTime, | ||||||
|  | 		expiryTime, | ||||||
|  | 		v.IPRange.String(), | ||||||
|  | 		string(v.Protocol), | ||||||
|  | 		v.Version, | ||||||
|  | 		""}, // That right, the account SAS requires a terminating extra newline
 | ||||||
|  | 		"\n") | ||||||
|  | 
 | ||||||
|  | 	signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) | ||||||
|  | 	p := SASQueryParameters{ | ||||||
|  | 		// Common SAS parameters
 | ||||||
|  | 		version:     v.Version, | ||||||
|  | 		protocol:    v.Protocol, | ||||||
|  | 		startTime:   v.StartTime, | ||||||
|  | 		expiryTime:  v.ExpiryTime, | ||||||
|  | 		permissions: v.Permissions, | ||||||
|  | 		ipRange:     v.IPRange, | ||||||
|  | 
 | ||||||
|  | 		// Account-specific SAS parameters
 | ||||||
|  | 		services:      v.Services, | ||||||
|  | 		resourceTypes: v.ResourceTypes, | ||||||
|  | 
 | ||||||
|  | 		// Calculated SAS signature
 | ||||||
|  | 		signature: signature, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return p, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
 | ||||||
|  | type AccountSASPermissions struct { | ||||||
|  | 	Read, Write, Delete, List, Add, Create, Update, Process bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String produces the SAS permissions string for an Azure Storage account.
 | ||||||
|  | // Call this method to set AccountSASSignatureValues's Permissions field.
 | ||||||
|  | func (p AccountSASPermissions) String() string { | ||||||
|  | 	var buffer bytes.Buffer | ||||||
|  | 	if p.Read { | ||||||
|  | 		buffer.WriteRune('r') | ||||||
|  | 	} | ||||||
|  | 	if p.Write { | ||||||
|  | 		buffer.WriteRune('w') | ||||||
|  | 	} | ||||||
|  | 	if p.Delete { | ||||||
|  | 		buffer.WriteRune('d') | ||||||
|  | 	} | ||||||
|  | 	if p.List { | ||||||
|  | 		buffer.WriteRune('l') | ||||||
|  | 	} | ||||||
|  | 	if p.Add { | ||||||
|  | 		buffer.WriteRune('a') | ||||||
|  | 	} | ||||||
|  | 	if p.Create { | ||||||
|  | 		buffer.WriteRune('c') | ||||||
|  | 	} | ||||||
|  | 	if p.Update { | ||||||
|  | 		buffer.WriteRune('u') | ||||||
|  | 	} | ||||||
|  | 	if p.Process { | ||||||
|  | 		buffer.WriteRune('p') | ||||||
|  | 	} | ||||||
|  | 	return buffer.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the AccountSASPermissions's fields from a string.
 | ||||||
|  | func (p *AccountSASPermissions) Parse(s string) error { | ||||||
|  | 	*p = AccountSASPermissions{} // Clear out the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 'r': | ||||||
|  | 			p.Read = true | ||||||
|  | 		case 'w': | ||||||
|  | 			p.Write = true | ||||||
|  | 		case 'd': | ||||||
|  | 			p.Delete = true | ||||||
|  | 		case 'l': | ||||||
|  | 			p.List = true | ||||||
|  | 		case 'a': | ||||||
|  | 			p.Add = true | ||||||
|  | 		case 'c': | ||||||
|  | 			p.Create = true | ||||||
|  | 		case 'u': | ||||||
|  | 			p.Update = true | ||||||
|  | 		case 'p': | ||||||
|  | 			p.Process = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("Invalid permission character: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
 | ||||||
|  | type AccountSASServices struct { | ||||||
|  | 	Blob, Queue, File bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String produces the SAS services string for an Azure Storage account.
 | ||||||
|  | // Call this method to set AccountSASSignatureValues's Services field.
 | ||||||
|  | func (s AccountSASServices) String() string { | ||||||
|  | 	var buffer bytes.Buffer | ||||||
|  | 	if s.Blob { | ||||||
|  | 		buffer.WriteRune('b') | ||||||
|  | 	} | ||||||
|  | 	if s.Queue { | ||||||
|  | 		buffer.WriteRune('q') | ||||||
|  | 	} | ||||||
|  | 	if s.File { | ||||||
|  | 		buffer.WriteRune('f') | ||||||
|  | 	} | ||||||
|  | 	return buffer.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the AccountSASServices' fields from a string.
 | ||||||
|  | func (a *AccountSASServices) Parse(s string) error { | ||||||
|  | 	*a = AccountSASServices{} // Clear out the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 'b': | ||||||
|  | 			a.Blob = true | ||||||
|  | 		case 'q': | ||||||
|  | 			a.Queue = true | ||||||
|  | 		case 'f': | ||||||
|  | 			a.File = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("Invalid service character: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
 | ||||||
|  | // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
 | ||||||
|  | type AccountSASResourceTypes struct { | ||||||
|  | 	Service, Container, Object bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String produces the SAS resource types string for an Azure Storage account.
 | ||||||
|  | // Call this method to set AccountSASSignatureValues's ResourceTypes field.
 | ||||||
|  | func (rt AccountSASResourceTypes) String() string { | ||||||
|  | 	var buffer bytes.Buffer | ||||||
|  | 	if rt.Service { | ||||||
|  | 		buffer.WriteRune('s') | ||||||
|  | 	} | ||||||
|  | 	if rt.Container { | ||||||
|  | 		buffer.WriteRune('c') | ||||||
|  | 	} | ||||||
|  | 	if rt.Object { | ||||||
|  | 		buffer.WriteRune('o') | ||||||
|  | 	} | ||||||
|  | 	return buffer.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Parse initializes the AccountSASResourceType's fields from a string.
 | ||||||
|  | func (rt *AccountSASResourceTypes) Parse(s string) error { | ||||||
|  | 	*rt = AccountSASResourceTypes{} // Clear out the flags
 | ||||||
|  | 	for _, r := range s { | ||||||
|  | 		switch r { | ||||||
|  | 		case 's': | ||||||
|  | 			rt.Service = true | ||||||
|  | 		case 'c': | ||||||
|  | 			rt.Container = true | ||||||
|  | 		case 'o': | ||||||
|  | 			rt.Object = true | ||||||
|  | 		default: | ||||||
|  | 			return fmt.Errorf("Invalid resource type: '%v'", r) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										322
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										322
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,322 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"net" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strings" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // SASVersion indicates the SAS version.
 | ||||||
|  | const SASVersion = ServiceVersion | ||||||
|  | 
 | ||||||
|  | type SASProtocol string | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// SASProtocolHTTPS can be specified for a SAS protocol
 | ||||||
|  | 	SASProtocolHTTPS SASProtocol = "https" | ||||||
|  | 
 | ||||||
|  | 	// SASProtocolHTTPSandHTTP can be specified for a SAS protocol
 | ||||||
|  | 	SASProtocolHTTPSandHTTP SASProtocol = "https,http" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a
 | ||||||
|  | // SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
 | ||||||
|  | func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { | ||||||
|  | 	ss := "" | ||||||
|  | 	if !startTime.IsZero() { | ||||||
|  | 		ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
 | ||||||
|  | 	} | ||||||
|  | 	se := "" | ||||||
|  | 	if !expiryTime.IsZero() { | ||||||
|  | 		se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ"
 | ||||||
|  | 	} | ||||||
|  | 	sh := "" | ||||||
|  | 	if !snapshotTime.IsZero() { | ||||||
|  | 		sh = snapshotTime.Format(SnapshotTimeFormat) | ||||||
|  | 	} | ||||||
|  | 	return ss, se, sh | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
 | ||||||
|  | const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601
 | ||||||
|  | 
 | ||||||
|  | // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
 | ||||||
|  | 
 | ||||||
|  | // A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
 | ||||||
|  | // You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
 | ||||||
|  | // to a query parameter map by calling AddToValues().
 | ||||||
|  | // NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
 | ||||||
|  | //
 | ||||||
|  | // This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
 | ||||||
|  | type SASQueryParameters struct { | ||||||
|  | 	// All members are immutable or values so copies of this struct are goroutine-safe.
 | ||||||
|  | 	version            string      `param:"sv"` | ||||||
|  | 	services           string      `param:"ss"` | ||||||
|  | 	resourceTypes      string      `param:"srt"` | ||||||
|  | 	protocol           SASProtocol `param:"spr"` | ||||||
|  | 	startTime          time.Time   `param:"st"` | ||||||
|  | 	expiryTime         time.Time   `param:"se"` | ||||||
|  | 	snapshotTime       time.Time   `param:"snapshot"` | ||||||
|  | 	ipRange            IPRange     `param:"sip"` | ||||||
|  | 	identifier         string      `param:"si"` | ||||||
|  | 	resource           string      `param:"sr"` | ||||||
|  | 	permissions        string      `param:"sp"` | ||||||
|  | 	signature          string      `param:"sig"` | ||||||
|  | 	cacheControl       string      `param:"rscc"` | ||||||
|  | 	contentDisposition string      `param:"rscd"` | ||||||
|  | 	contentEncoding    string      `param:"rsce"` | ||||||
|  | 	contentLanguage    string      `param:"rscl"` | ||||||
|  | 	contentType        string      `param:"rsct"` | ||||||
|  | 	signedOid          string      `param:"skoid"` | ||||||
|  | 	signedTid          string      `param:"sktid"` | ||||||
|  | 	signedStart        time.Time   `param:"skt"` | ||||||
|  | 	signedExpiry       time.Time   `param:"ske"` | ||||||
|  | 	signedService      string      `param:"sks"` | ||||||
|  | 	signedVersion      string      `param:"skv"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedOid() string { | ||||||
|  | 	return p.signedOid | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedTid() string { | ||||||
|  | 	return p.signedTid | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedStart() time.Time { | ||||||
|  | 	return p.signedStart | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedExpiry() time.Time { | ||||||
|  | 	return p.signedExpiry | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedService() string { | ||||||
|  | 	return p.signedService | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SignedVersion() string { | ||||||
|  | 	return p.signedVersion | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) SnapshotTime() time.Time { | ||||||
|  | 	return p.snapshotTime | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) Version() string { | ||||||
|  | 	return p.version | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) Services() string { | ||||||
|  | 	return p.services | ||||||
|  | } | ||||||
|  | func (p *SASQueryParameters) ResourceTypes() string { | ||||||
|  | 	return p.resourceTypes | ||||||
|  | } | ||||||
|  | func (p *SASQueryParameters) Protocol() SASProtocol { | ||||||
|  | 	return p.protocol | ||||||
|  | } | ||||||
|  | func (p *SASQueryParameters) StartTime() time.Time { | ||||||
|  | 	return p.startTime | ||||||
|  | } | ||||||
|  | func (p *SASQueryParameters) ExpiryTime() time.Time { | ||||||
|  | 	return p.expiryTime | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) IPRange() IPRange { | ||||||
|  | 	return p.ipRange | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) Identifier() string { | ||||||
|  | 	return p.identifier | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) Resource() string { | ||||||
|  | 	return p.resource | ||||||
|  | } | ||||||
|  | func (p *SASQueryParameters) Permissions() string { | ||||||
|  | 	return p.permissions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) Signature() string { | ||||||
|  | 	return p.signature | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) CacheControl() string { | ||||||
|  | 	return p.cacheControl | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) ContentDisposition() string { | ||||||
|  | 	return p.contentDisposition | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) ContentEncoding() string { | ||||||
|  | 	return p.contentEncoding | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) ContentLanguage() string { | ||||||
|  | 	return p.contentLanguage | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *SASQueryParameters) ContentType() string { | ||||||
|  | 	return p.contentType | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IPRange represents a SAS IP range's start IP and (optionally) end IP.
 | ||||||
|  | type IPRange struct { | ||||||
|  | 	Start net.IP // Not specified if length = 0
 | ||||||
|  | 	End   net.IP // Not specified if length = 0
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns a string representation of an IPRange.
 | ||||||
|  | func (ipr *IPRange) String() string { | ||||||
|  | 	if len(ipr.Start) == 0 { | ||||||
|  | 		return "" | ||||||
|  | 	} | ||||||
|  | 	start := ipr.Start.String() | ||||||
|  | 	if len(ipr.End) == 0 { | ||||||
|  | 		return start | ||||||
|  | 	} | ||||||
|  | 	return start + "-" + ipr.End.String() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewSASQueryParameters creates and initializes a SASQueryParameters object based on the
 | ||||||
|  | // query parameter map's passed-in values. If deleteSASParametersFromValues is true,
 | ||||||
|  | // all SAS-related query parameters are removed from the passed-in map. If
 | ||||||
|  | // deleteSASParametersFromValues is false, the map passed-in map is unaltered.
 | ||||||
|  | func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { | ||||||
|  | 	p := SASQueryParameters{} | ||||||
|  | 	for k, v := range values { | ||||||
|  | 		val := v[0] | ||||||
|  | 		isSASKey := true | ||||||
|  | 		switch strings.ToLower(k) { | ||||||
|  | 		case "sv": | ||||||
|  | 			p.version = val | ||||||
|  | 		case "ss": | ||||||
|  | 			p.services = val | ||||||
|  | 		case "srt": | ||||||
|  | 			p.resourceTypes = val | ||||||
|  | 		case "spr": | ||||||
|  | 			p.protocol = SASProtocol(val) | ||||||
|  | 		case "snapshot": | ||||||
|  | 			p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) | ||||||
|  | 		case "st": | ||||||
|  | 			p.startTime, _ = time.Parse(SASTimeFormat, val) | ||||||
|  | 		case "se": | ||||||
|  | 			p.expiryTime, _ = time.Parse(SASTimeFormat, val) | ||||||
|  | 		case "sip": | ||||||
|  | 			dashIndex := strings.Index(val, "-") | ||||||
|  | 			if dashIndex == -1 { | ||||||
|  | 				p.ipRange.Start = net.ParseIP(val) | ||||||
|  | 			} else { | ||||||
|  | 				p.ipRange.Start = net.ParseIP(val[:dashIndex]) | ||||||
|  | 				p.ipRange.End = net.ParseIP(val[dashIndex+1:]) | ||||||
|  | 			} | ||||||
|  | 		case "si": | ||||||
|  | 			p.identifier = val | ||||||
|  | 		case "sr": | ||||||
|  | 			p.resource = val | ||||||
|  | 		case "sp": | ||||||
|  | 			p.permissions = val | ||||||
|  | 		case "sig": | ||||||
|  | 			p.signature = val | ||||||
|  | 		case "rscc": | ||||||
|  | 			p.cacheControl = val | ||||||
|  | 		case "rscd": | ||||||
|  | 			p.contentDisposition = val | ||||||
|  | 		case "rsce": | ||||||
|  | 			p.contentEncoding = val | ||||||
|  | 		case "rscl": | ||||||
|  | 			p.contentLanguage = val | ||||||
|  | 		case "rsct": | ||||||
|  | 			p.contentType = val | ||||||
|  | 		case "skoid": | ||||||
|  | 			p.signedOid = val | ||||||
|  | 		case "sktid": | ||||||
|  | 			p.signedTid = val | ||||||
|  | 		case "skt": | ||||||
|  | 			p.signedStart, _ = time.Parse(SASTimeFormat, val) | ||||||
|  | 		case "ske": | ||||||
|  | 			p.signedExpiry, _ = time.Parse(SASTimeFormat, val) | ||||||
|  | 		case "sks": | ||||||
|  | 			p.signedService = val | ||||||
|  | 		case "skv": | ||||||
|  | 			p.signedVersion = val | ||||||
|  | 		default: | ||||||
|  | 			isSASKey = false // We didn't recognize the query parameter
 | ||||||
|  | 		} | ||||||
|  | 		if isSASKey && deleteSASParametersFromValues { | ||||||
|  | 			delete(values, k) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AddToValues adds the SAS components to the specified query parameters map.
 | ||||||
|  | func (p *SASQueryParameters) addToValues(v url.Values) url.Values { | ||||||
|  | 	if p.version != "" { | ||||||
|  | 		v.Add("sv", p.version) | ||||||
|  | 	} | ||||||
|  | 	if p.services != "" { | ||||||
|  | 		v.Add("ss", p.services) | ||||||
|  | 	} | ||||||
|  | 	if p.resourceTypes != "" { | ||||||
|  | 		v.Add("srt", p.resourceTypes) | ||||||
|  | 	} | ||||||
|  | 	if p.protocol != "" { | ||||||
|  | 		v.Add("spr", string(p.protocol)) | ||||||
|  | 	} | ||||||
|  | 	if !p.startTime.IsZero() { | ||||||
|  | 		v.Add("st", p.startTime.Format(SASTimeFormat)) | ||||||
|  | 	} | ||||||
|  | 	if !p.expiryTime.IsZero() { | ||||||
|  | 		v.Add("se", p.expiryTime.Format(SASTimeFormat)) | ||||||
|  | 	} | ||||||
|  | 	if len(p.ipRange.Start) > 0 { | ||||||
|  | 		v.Add("sip", p.ipRange.String()) | ||||||
|  | 	} | ||||||
|  | 	if p.identifier != "" { | ||||||
|  | 		v.Add("si", p.identifier) | ||||||
|  | 	} | ||||||
|  | 	if p.resource != "" { | ||||||
|  | 		v.Add("sr", p.resource) | ||||||
|  | 	} | ||||||
|  | 	if p.permissions != "" { | ||||||
|  | 		v.Add("sp", p.permissions) | ||||||
|  | 	} | ||||||
|  | 	if p.signedOid != "" { | ||||||
|  | 		v.Add("skoid", p.signedOid) | ||||||
|  | 		v.Add("sktid", p.signedTid) | ||||||
|  | 		v.Add("skt", p.signedStart.Format(SASTimeFormat)) | ||||||
|  | 		v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) | ||||||
|  | 		v.Add("sks", p.signedService) | ||||||
|  | 		v.Add("skv", p.signedVersion) | ||||||
|  | 	} | ||||||
|  | 	if p.signature != "" { | ||||||
|  | 		v.Add("sig", p.signature) | ||||||
|  | 	} | ||||||
|  | 	if p.cacheControl != "" { | ||||||
|  | 		v.Add("rscc", p.cacheControl) | ||||||
|  | 	} | ||||||
|  | 	if p.contentDisposition != "" { | ||||||
|  | 		v.Add("rscd", p.contentDisposition) | ||||||
|  | 	} | ||||||
|  | 	if p.contentEncoding != "" { | ||||||
|  | 		v.Add("rsce", p.contentEncoding) | ||||||
|  | 	} | ||||||
|  | 	if p.contentLanguage != "" { | ||||||
|  | 		v.Add("rscl", p.contentLanguage) | ||||||
|  | 	} | ||||||
|  | 	if p.contentType != "" { | ||||||
|  | 		v.Add("rsct", p.contentType) | ||||||
|  | 	} | ||||||
|  | 	return v | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Encode encodes the SAS query parameters into URL encoded form sorted by key.
 | ||||||
|  | func (p *SASQueryParameters) Encode() string { | ||||||
|  | 	v := url.Values{} | ||||||
|  | 	p.addToValues(v) | ||||||
|  | 	return v.Encode() | ||||||
|  | } | ||||||
							
								
								
									
										131
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										131
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,131 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes
 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code.
 | ||||||
|  | 	ServiceCodeNone ServiceCodeType = "" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeAccountAlreadyExists means the specified account already exists.
 | ||||||
|  | 	ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403).
 | ||||||
|  | 	ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeAccountIsDisabled means the specified account is disabled (403).
 | ||||||
|  | 	ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403).
 | ||||||
|  | 	ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400).
 | ||||||
|  | 	ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412).
 | ||||||
|  | 	ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400).
 | ||||||
|  | 	ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403).
 | ||||||
|  | 	ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500).
 | ||||||
|  | 	ServiceCodeInternalError ServiceCodeType = "InternalError" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400).
 | ||||||
|  | 	ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400).
 | ||||||
|  | 	ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400).
 | ||||||
|  | 	ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidInput means one of the request inputs is not valid (400).
 | ||||||
|  | 	ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400).
 | ||||||
|  | 	ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400).
 | ||||||
|  | 	ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400).
 | ||||||
|  | 	ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416).
 | ||||||
|  | 	ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400).
 | ||||||
|  | 	ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400).
 | ||||||
|  | 	ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400).
 | ||||||
|  | 	ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400).
 | ||||||
|  | 	ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400).
 | ||||||
|  | 	ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400).
 | ||||||
|  | 	ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411).
 | ||||||
|  | 	ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400).
 | ||||||
|  | 	ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400).
 | ||||||
|  | 	ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400).
 | ||||||
|  | 	ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400).
 | ||||||
|  | 	ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500).
 | ||||||
|  | 	ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400).
 | ||||||
|  | 	ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400).
 | ||||||
|  | 	ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413).
 | ||||||
|  | 	ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409).
 | ||||||
|  | 	ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400).
 | ||||||
|  | 	ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeResourceAlreadyExists means the specified resource already exists (409).
 | ||||||
|  | 	ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeResourceNotFound means the specified resource does not exist (404).
 | ||||||
|  | 	ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503).
 | ||||||
|  | 	ServiceCodeServerBusy ServiceCodeType = "ServerBusy" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400).
 | ||||||
|  | 	ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400).
 | ||||||
|  | 	ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400).
 | ||||||
|  | 	ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" | ||||||
|  | 
 | ||||||
|  | 	// ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405).
 | ||||||
|  | 	ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" | ||||||
|  | ) | ||||||
							
								
								
									
										111
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										111
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,111 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"encoding/xml" | ||||||
|  | 	"fmt" | ||||||
|  | 	"net/http" | ||||||
|  | 	"sort" | ||||||
|  | 
 | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func init() { | ||||||
|  | 	// wire up our custom error handling constructor
 | ||||||
|  | 	responseErrorFactory = newStorageError | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ServiceCodeType is a string identifying a storage service error.
 | ||||||
|  | // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2
 | ||||||
|  | type ServiceCodeType string | ||||||
|  | 
 | ||||||
|  | // StorageError identifies a responder-generated network or response parsing error.
 | ||||||
|  | type StorageError interface { | ||||||
|  | 	// ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response().
 | ||||||
|  | 	ResponseError | ||||||
|  | 
 | ||||||
|  | 	// ServiceCode returns a service error code. Your code can use this to make error recovery decisions.
 | ||||||
|  | 	ServiceCode() ServiceCodeType | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // storageError is the internal struct that implements the public StorageError interface.
 | ||||||
|  | type storageError struct { | ||||||
|  | 	responseError | ||||||
|  | 	serviceCode ServiceCodeType | ||||||
|  | 	details     map[string]string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newStorageError creates an error object that implements the error interface.
 | ||||||
|  | func newStorageError(cause error, response *http.Response, description string) error { | ||||||
|  | 	return &storageError{ | ||||||
|  | 		responseError: responseError{ | ||||||
|  | 			ErrorNode:   pipeline.ErrorNode{}.Initialize(cause, 3), | ||||||
|  | 			response:    response, | ||||||
|  | 			description: description, | ||||||
|  | 		}, | ||||||
|  | 		serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ServiceCode returns service-error information. The caller may examine these values but should not modify any of them.
 | ||||||
|  | func (e *storageError) ServiceCode() ServiceCodeType { | ||||||
|  | 	return e.serviceCode | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error implements the error interface's Error method to return a string representation of the error.
 | ||||||
|  | func (e *storageError) Error() string { | ||||||
|  | 	b := &bytes.Buffer{} | ||||||
|  | 	fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) | ||||||
|  | 	fmt.Fprintf(b, "Description=%s, Details: ", e.description) | ||||||
|  | 	if len(e.details) == 0 { | ||||||
|  | 		b.WriteString("(none)\n") | ||||||
|  | 	} else { | ||||||
|  | 		b.WriteRune('\n') | ||||||
|  | 		keys := make([]string, 0, len(e.details)) | ||||||
|  | 		// Alphabetize the details
 | ||||||
|  | 		for k := range e.details { | ||||||
|  | 			keys = append(keys, k) | ||||||
|  | 		} | ||||||
|  | 		sort.Strings(keys) | ||||||
|  | 		for _, k := range keys { | ||||||
|  | 			fmt.Fprintf(b, "   %s: %+v\n", k, e.details[k]) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request
 | ||||||
|  | 	pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) | ||||||
|  | 	return e.ErrorNode.Error(b.String()) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503).
 | ||||||
|  | func (e *storageError) Temporary() bool { | ||||||
|  | 	if e.response != nil { | ||||||
|  | 		if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { | ||||||
|  | 			return true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return e.ErrorNode.Temporary() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors.
 | ||||||
|  | func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { | ||||||
|  | 	tokName := "" | ||||||
|  | 	var t xml.Token | ||||||
|  | 	for t, err = d.Token(); err == nil; t, err = d.Token() { | ||||||
|  | 		switch tt := t.(type) { | ||||||
|  | 		case xml.StartElement: | ||||||
|  | 			tokName = tt.Name.Local | ||||||
|  | 			break | ||||||
|  | 		case xml.CharData: | ||||||
|  | 			switch tokName { | ||||||
|  | 			case "Message": | ||||||
|  | 				e.description = string(tt) | ||||||
|  | 			default: | ||||||
|  | 				if e.details == nil { | ||||||
|  | 					e.details = map[string]string{} | ||||||
|  | 				} | ||||||
|  | 				e.details[tokName] = string(tt) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
							
								
								
									
										64
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										64
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,64 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"strconv" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // httpRange defines a range of bytes within an HTTP resource, starting at offset and
 | ||||||
|  | // ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange
 | ||||||
|  | // which has an offset but na zero value count indicates from the offset to the resource's end.
 | ||||||
|  | type httpRange struct { | ||||||
|  | 	offset int64 | ||||||
|  | 	count  int64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r httpRange) pointers() *string { | ||||||
|  | 	if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance
 | ||||||
|  | 		return nil // No specified range
 | ||||||
|  | 	} | ||||||
|  | 	endOffset := "" // if count == CountToEnd (0)
 | ||||||
|  | 	if r.count > 0 { | ||||||
|  | 		endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) | ||||||
|  | 	} | ||||||
|  | 	dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) | ||||||
|  | 	return &dataRange | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { | ||||||
|  | 	if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
 | ||||||
|  | 		return 0, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	err := validateSeekableStreamAt0(body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	count, err := body.Seek(0, io.SeekEnd) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, errors.New("body stream must be seekable") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	body.Seek(0, io.SeekStart) | ||||||
|  | 	return count, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // return an error if body is not a valid seekable stream at 0
 | ||||||
|  | func validateSeekableStreamAt0(body io.ReadSeeker) error { | ||||||
|  | 	if body == nil { // nil body's are "logically" seekable to 0
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { | ||||||
|  | 		// Help detect programmer error
 | ||||||
|  | 		if err != nil { | ||||||
|  | 			return errors.New("body stream must be seekable") | ||||||
|  | 		} | ||||||
|  | 		return errors.New("body stream must be set to position 0") | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,77 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"crypto/rand" | ||||||
|  | 	"fmt" | ||||||
|  | 	"strconv" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // The UUID reserved variants.
 | ||||||
|  | const ( | ||||||
|  | 	reservedNCS       byte = 0x80 | ||||||
|  | 	reservedRFC4122   byte = 0x40 | ||||||
|  | 	reservedMicrosoft byte = 0x20 | ||||||
|  | 	reservedFuture    byte = 0x00 | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // A UUID representation compliant with specification in RFC 4122 document.
 | ||||||
|  | type uuid [16]byte | ||||||
|  | 
 | ||||||
|  | // NewUUID returns a new uuid using RFC 4122 algorithm.
 | ||||||
|  | func newUUID() (u uuid) { | ||||||
|  | 	u = uuid{} | ||||||
|  | 	// Set all bits to randomly (or pseudo-randomly) chosen values.
 | ||||||
|  | 	rand.Read(u[:]) | ||||||
|  | 	u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
 | ||||||
|  | 
 | ||||||
|  | 	var version byte = 4 | ||||||
|  | 	u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns an unparsed version of the generated UUID sequence.
 | ||||||
|  | func (u uuid) String() string { | ||||||
|  | 	return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f"
 | ||||||
|  | // or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID.
 | ||||||
|  | func parseUUID(uuidStr string) uuid { | ||||||
|  | 	char := func(hexString string) byte { | ||||||
|  | 		i, _ := strconv.ParseUint(hexString, 16, 8) | ||||||
|  | 		return byte(i) | ||||||
|  | 	} | ||||||
|  | 	if uuidStr[0] == '{' { | ||||||
|  | 		uuidStr = uuidStr[1:] // Skip over the '{'
 | ||||||
|  | 	} | ||||||
|  | 	// 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f
 | ||||||
|  | 	//             1 11 1 11 11 1 12 22 2 22 22 22 33 33 33
 | ||||||
|  | 	// 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45
 | ||||||
|  | 	uuidVal := uuid{ | ||||||
|  | 		char(uuidStr[0:2]), | ||||||
|  | 		char(uuidStr[2:4]), | ||||||
|  | 		char(uuidStr[4:6]), | ||||||
|  | 		char(uuidStr[6:8]), | ||||||
|  | 
 | ||||||
|  | 		char(uuidStr[9:11]), | ||||||
|  | 		char(uuidStr[11:13]), | ||||||
|  | 
 | ||||||
|  | 		char(uuidStr[14:16]), | ||||||
|  | 		char(uuidStr[16:18]), | ||||||
|  | 
 | ||||||
|  | 		char(uuidStr[19:21]), | ||||||
|  | 		char(uuidStr[21:23]), | ||||||
|  | 
 | ||||||
|  | 		char(uuidStr[24:26]), | ||||||
|  | 		char(uuidStr[26:28]), | ||||||
|  | 		char(uuidStr[28:30]), | ||||||
|  | 		char(uuidStr[30:32]), | ||||||
|  | 		char(uuidStr[32:34]), | ||||||
|  | 		char(uuidStr[34:36]), | ||||||
|  | 	} | ||||||
|  | 	return uuidVal | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (u uuid) bytes() []byte { | ||||||
|  | 	return u[:] | ||||||
|  | } | ||||||
|  | @ -0,0 +1,89 @@ | ||||||
|  | // Copyright 2017 Microsoft Corporation. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by an MIT
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | /* | ||||||
|  | Package azblob allows you to manipulate Azure Storage containers and blobs objects. | ||||||
|  | 
 | ||||||
|  | URL Types | ||||||
|  | 
 | ||||||
|  | The most common types you'll work with are the XxxURL types. The methods of these types make requests | ||||||
|  | against the Azure Storage Service. | ||||||
|  | 
 | ||||||
|  |  - ServiceURL's          methods perform operations on a storage account. | ||||||
|  |     - ContainerURL's     methods perform operations on an account's container. | ||||||
|  |        - BlockBlobURL's  methods perform operations on a container's block blob. | ||||||
|  |        - AppendBlobURL's methods perform operations on a container's append blob. | ||||||
|  |        - PageBlobURL's   methods perform operations on a container's page blob. | ||||||
|  |        - BlobURL's       methods perform operations on a container's blob regardless of the blob's type. | ||||||
|  | 
 | ||||||
|  | Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP | ||||||
|  | request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. | ||||||
|  | The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. | ||||||
|  | 
 | ||||||
|  | Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass | ||||||
|  | an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own | ||||||
|  | URL but it shares the same pipeline as the parent ServiceURL object. | ||||||
|  | 
 | ||||||
|  | To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. | ||||||
|  | To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL | ||||||
|  | respectively. These three types are all identical except for the methods they expose; each type exposes the methods | ||||||
|  | relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; | ||||||
|  | this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, | ||||||
|  | the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You | ||||||
|  | can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. | ||||||
|  | 
 | ||||||
|  | If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL | ||||||
|  | object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object | ||||||
|  | with the same URL as the original but with the specified pipeline. | ||||||
|  | 
 | ||||||
|  | Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that | ||||||
|  | XxxURL objects share a lot of system resources making them very efficient. | ||||||
|  | 
 | ||||||
|  | All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, | ||||||
|  | transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an | ||||||
|  | example of how to do deal with errors. | ||||||
|  | 
 | ||||||
|  | URL and Shared Access Signature Manipulation | ||||||
|  | 
 | ||||||
|  | The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types | ||||||
|  | for generating and parsing Shared Access Signature (SAS) | ||||||
|  |  - Use the AccountSASSignatureValues type to create a SAS for a storage account. | ||||||
|  |  - Use the BlobSASSignatureValues type to create a SAS for a container or blob. | ||||||
|  |  - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. | ||||||
|  | 
 | ||||||
|  | To generate a SAS, you must use the SharedKeyCredential type. | ||||||
|  | 
 | ||||||
|  | Credentials | ||||||
|  | 
 | ||||||
|  | When creating a request pipeline, you must specify one of this package's credential types. | ||||||
|  |  - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). | ||||||
|  |  - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this | ||||||
|  |    to generate Shared Access Signatures. | ||||||
|  | 
 | ||||||
|  | HTTP Request Policy Factories | ||||||
|  | 
 | ||||||
|  | This package defines several request policy factories for use with the pipeline package. | ||||||
|  | Most applications will not use these factories directly; instead, the NewPipeline | ||||||
|  | function creates these factories, initializes them (via the PipelineOptions type) | ||||||
|  | and returns a pipeline object for use by the XxxURL objects. | ||||||
|  | 
 | ||||||
|  | However, for advanced scenarios, developers can access these policy factories directly | ||||||
|  | and even create their own and then construct their own pipeline in order to affect HTTP | ||||||
|  | requests and responses performed by the XxxURL objects. For example, developers can | ||||||
|  | introduce their own logging, random failures, request recording & playback for fast | ||||||
|  | testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The | ||||||
|  | possibilities are endless! | ||||||
|  | 
 | ||||||
|  | Below are the request pipeline policy factory functions that are provided with this | ||||||
|  | package: | ||||||
|  |  - NewRetryPolicyFactory           Enables rich retry semantics for failed HTTP requests. | ||||||
|  |  - NewRequestLogPolicyFactory      Enables rich logging support for HTTP requests/responses & failures. | ||||||
|  |  - NewTelemetryPolicyFactory       Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. | ||||||
|  |  - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. | ||||||
|  | 
 | ||||||
|  | Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. | ||||||
|  | */ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // 	TokenCredential     Use this to access resources using Role-Based Access Control (RBAC).
 | ||||||
							
								
								
									
										349
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										349
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,349 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // appendBlobClient is the client for the AppendBlob methods of the Azblob service.
 | ||||||
|  | type appendBlobClient struct { | ||||||
|  | 	managementClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newAppendBlobClient creates an instance of the appendBlobClient client.
 | ||||||
|  | func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { | ||||||
|  | 	return appendBlobClient{newManagementClient(url, p)} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append
 | ||||||
|  | // Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is
 | ||||||
|  | // supported only on version 2015-02-21 version or later.
 | ||||||
|  | //
 | ||||||
|  | // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
 | ||||||
|  | // error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
 | ||||||
|  | // information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> transactionalContentMD5 is specify the transactional md5 for the body, to
 | ||||||
|  | // be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active
 | ||||||
|  | // and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob.
 | ||||||
|  | // If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than
 | ||||||
|  | // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code
 | ||||||
|  | // 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation.
 | ||||||
|  | // A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to
 | ||||||
|  | // this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
 | ||||||
|  | // - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been
 | ||||||
|  | // modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
 | ||||||
|  | // it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
 | ||||||
|  | // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
 | ||||||
|  | // logs when storage analytics logging is enabled.
 | ||||||
|  | func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: body, | ||||||
|  | 			constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*AppendBlobAppendBlockResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendBlockPreparer prepares the AppendBlock request.
 | ||||||
|  | func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "appendblock") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if transactionalContentMD5 != nil { | ||||||
|  | 		req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if maxSize != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) | ||||||
|  | 	} | ||||||
|  | 	if appendPosition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendBlockResponder handles the response to the AppendBlock request.
 | ||||||
|  | func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob
 | ||||||
|  | // where the contents are read from a source url. The Append Block operation is permitted only if the blob was created
 | ||||||
|  | // with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
 | ||||||
|  | //
 | ||||||
|  | // sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of
 | ||||||
|  | // source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must
 | ||||||
|  | // be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for
 | ||||||
|  | // the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is
 | ||||||
|  | // already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error
 | ||||||
|  | // (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the
 | ||||||
|  | // Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append
 | ||||||
|  | // position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error
 | ||||||
|  | // (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob
 | ||||||
|  | // if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate
 | ||||||
|  | // only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to
 | ||||||
|  | // operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
 | ||||||
|  | // matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified
 | ||||||
|  | // since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it
 | ||||||
|  | // has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value.
 | ||||||
|  | // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
 | ||||||
|  | // logs when storage analytics logging is enabled.
 | ||||||
|  | func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*AppendBlobAppendBlockFromURLResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendBlockFromURLPreparer prepares the AppendBlockFromURL request.
 | ||||||
|  | func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "appendblock") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-copy-source", sourceURL) | ||||||
|  | 	if sourceRange != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-range", *sourceRange) | ||||||
|  | 	} | ||||||
|  | 	if sourceContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if maxSize != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) | ||||||
|  | 	} | ||||||
|  | 	if appendPosition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfModifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfNoneMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // appendBlockFromURLResponder handles the response to the AppendBlockFromURL request.
 | ||||||
|  | func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Create the Create Append Blob operation creates a new append blob.
 | ||||||
|  | //
 | ||||||
|  | // contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
 | ||||||
|  | // information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
 | ||||||
|  | // this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
 | ||||||
|  | // blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
 | ||||||
|  | // blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
 | ||||||
|  | // blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
 | ||||||
|  | // hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
 | ||||||
|  | // blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
 | ||||||
|  | // returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
 | ||||||
|  | // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
 | ||||||
|  | // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
 | ||||||
|  | // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
 | ||||||
|  | // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
 | ||||||
|  | // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
 | ||||||
|  | // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
 | ||||||
|  | // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
 | ||||||
|  | // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
 | ||||||
|  | // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
 | ||||||
|  | // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*AppendBlobCreateResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // createPreparer prepares the Create request.
 | ||||||
|  | func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if blobContentType != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-type", *blobContentType) | ||||||
|  | 	} | ||||||
|  | 	if blobContentEncoding != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) | ||||||
|  | 	} | ||||||
|  | 	if blobContentLanguage != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) | ||||||
|  | 	} | ||||||
|  | 	if blobContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if blobCacheControl != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) | ||||||
|  | 	} | ||||||
|  | 	if metadata != nil { | ||||||
|  | 		for k, v := range metadata { | ||||||
|  | 			req.Header.Set("x-ms-meta-"+k, v) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if blobContentDisposition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-blob-type", "AppendBlob") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // createResponder handles the response to the Create request.
 | ||||||
|  | func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
							
								
								
									
										1365
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										1365
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										510
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										510
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,510 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"encoding/xml" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // blockBlobClient is the client for the BlockBlob methods of the Azblob service.
 | ||||||
|  | type blockBlobClient struct { | ||||||
|  | 	managementClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newBlockBlobClient creates an instance of the blockBlobClient client.
 | ||||||
|  | func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { | ||||||
|  | 	return blockBlobClient{newManagementClient(url, p)} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the
 | ||||||
|  | // blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior
 | ||||||
|  | // Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed,
 | ||||||
|  | // then committing the new and existing blocks together. You can do this by specifying whether to commit a block from
 | ||||||
|  | // the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the
 | ||||||
|  | // block, whichever list it may belong to.
 | ||||||
|  | //
 | ||||||
|  | // timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> blobCacheControl is optional. Sets the blob's cache control. If specified,
 | ||||||
|  | // this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's
 | ||||||
|  | // content type. If specified, this property is stored with the blob and returned with a read request.
 | ||||||
|  | // blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the
 | ||||||
|  | // blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If
 | ||||||
|  | // specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An
 | ||||||
|  | // MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were
 | ||||||
|  | // validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the
 | ||||||
|  | // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
 | ||||||
|  | // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
 | ||||||
|  | // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
 | ||||||
|  | // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
 | ||||||
|  | // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
 | ||||||
|  | // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
 | ||||||
|  | // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
 | ||||||
|  | // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
 | ||||||
|  | // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
 | ||||||
|  | // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*BlockBlobCommitBlockListResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // commitBlockListPreparer prepares the CommitBlockList request.
 | ||||||
|  | func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "blocklist") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if blobCacheControl != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) | ||||||
|  | 	} | ||||||
|  | 	if blobContentType != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-type", *blobContentType) | ||||||
|  | 	} | ||||||
|  | 	if blobContentEncoding != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) | ||||||
|  | 	} | ||||||
|  | 	if blobContentLanguage != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) | ||||||
|  | 	} | ||||||
|  | 	if blobContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if metadata != nil { | ||||||
|  | 		for k, v := range metadata { | ||||||
|  | 			req.Header.Set("x-ms-meta-"+k, v) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if blobContentDisposition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	b, err := xml.Marshal(blocks) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to marshal request body") | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("Content-Type", "application/xml") | ||||||
|  | 	err = req.SetBody(bytes.NewReader(b)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to set request body") | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // commitBlockListResponder handles the response to the CommitBlockList request.
 | ||||||
|  | func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block
 | ||||||
|  | // blob
 | ||||||
|  | //
 | ||||||
|  | // listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists
 | ||||||
|  | // together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob
 | ||||||
|  | // snapshot to retrieve. For more information on working with blob snapshots, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
 | ||||||
|  | // a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
 | ||||||
|  | // limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*BlockList), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getBlockListPreparer prepares the GetBlockList request.
 | ||||||
|  | func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if snapshot != nil && len(*snapshot) > 0 { | ||||||
|  | 		params.Set("snapshot", *snapshot) | ||||||
|  | 	} | ||||||
|  | 	params.Set("blocklisttype", string(listType)) | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "blocklist") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getBlockListResponder handles the response to the GetBlockList request.
 | ||||||
|  | func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &BlockList{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StageBlock the Stage Block operation creates a new block to be committed as part of a blob
 | ||||||
|  | //
 | ||||||
|  | // blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
 | ||||||
|  | // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
 | ||||||
|  | // same size for each block. contentLength is the length of the request. body is initial data body will be closed upon
 | ||||||
|  | // successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the
 | ||||||
|  | // transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in
 | ||||||
|  | // seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character
 | ||||||
|  | // limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: body, | ||||||
|  | 			constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*BlockBlobStageBlockResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stageBlockPreparer prepares the StageBlock request.
 | ||||||
|  | func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	params.Set("blockid", blockID) | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "block") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if transactionalContentMD5 != nil { | ||||||
|  | 		req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stageBlockResponder handles the response to the StageBlock request.
 | ||||||
|  | func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents
 | ||||||
|  | // are read from a URL.
 | ||||||
|  | //
 | ||||||
|  | // blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or
 | ||||||
|  | // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the
 | ||||||
|  | // same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source.
 | ||||||
|  | // sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the
 | ||||||
|  | // range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For
 | ||||||
|  | // more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if
 | ||||||
|  | // it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate
 | ||||||
|  | // only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to
 | ||||||
|  | // operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
 | ||||||
|  | // recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*BlockBlobStageBlockFromURLResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stageBlockFromURLPreparer prepares the StageBlockFromURL request.
 | ||||||
|  | func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	params.Set("blockid", blockID) | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "block") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	req.Header.Set("x-ms-copy-source", sourceURL) | ||||||
|  | 	if sourceRange != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-range", *sourceRange) | ||||||
|  | 	} | ||||||
|  | 	if sourceContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfModifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfNoneMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // stageBlockFromURLResponder handles the response to the StageBlockFromURL request.
 | ||||||
|  | func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block
 | ||||||
|  | // blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of
 | ||||||
|  | // the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a
 | ||||||
|  | // block blob, use the Put Block List operation.
 | ||||||
|  | //
 | ||||||
|  | // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
 | ||||||
|  | // error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
 | ||||||
|  | // information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
 | ||||||
|  | // this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
 | ||||||
|  | // blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
 | ||||||
|  | // blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
 | ||||||
|  | // blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
 | ||||||
|  | // hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
 | ||||||
|  | // blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
 | ||||||
|  | // returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
 | ||||||
|  | // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
 | ||||||
|  | // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
 | ||||||
|  | // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
 | ||||||
|  | // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
 | ||||||
|  | // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
 | ||||||
|  | // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
 | ||||||
|  | // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
 | ||||||
|  | // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
 | ||||||
|  | // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
 | ||||||
|  | // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: body, | ||||||
|  | 			constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*BlockBlobUploadResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadPreparer prepares the Upload request.
 | ||||||
|  | func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if blobContentType != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-type", *blobContentType) | ||||||
|  | 	} | ||||||
|  | 	if blobContentEncoding != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) | ||||||
|  | 	} | ||||||
|  | 	if blobContentLanguage != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) | ||||||
|  | 	} | ||||||
|  | 	if blobContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if blobCacheControl != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) | ||||||
|  | 	} | ||||||
|  | 	if metadata != nil { | ||||||
|  | 		for k, v := range metadata { | ||||||
|  | 			req.Header.Set("x-ms-meta-"+k, v) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if blobContentDisposition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-blob-type", "BlockBlob") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadResponder handles the response to the Upload request.
 | ||||||
|  | func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
							
								
								
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										38
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,38 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"net/url" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// ServiceVersion specifies the version of the operations used in this package.
 | ||||||
|  | 	ServiceVersion = "2018-11-09" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // managementClient is the base client for Azblob.
 | ||||||
|  | type managementClient struct { | ||||||
|  | 	url url.URL | ||||||
|  | 	p   pipeline.Pipeline | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newManagementClient creates an instance of the managementClient client.
 | ||||||
|  | func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { | ||||||
|  | 	return managementClient{ | ||||||
|  | 		url: url, | ||||||
|  | 		p:   p, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // URL returns a copy of the URL for this client.
 | ||||||
|  | func (mc managementClient) URL() url.URL { | ||||||
|  | 	return mc.url | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Pipeline returns the pipeline for this client.
 | ||||||
|  | func (mc managementClient) Pipeline() pipeline.Pipeline { | ||||||
|  | 	return mc.p | ||||||
|  | } | ||||||
							
								
								
									
										1037
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										1037
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										5202
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										5202
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										896
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										896
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,896 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/base64" | ||||||
|  | 	"encoding/xml" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // pageBlobClient is the client for the PageBlob methods of the Azblob service.
 | ||||||
|  | type pageBlobClient struct { | ||||||
|  | 	managementClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newPageBlobClient creates an instance of the pageBlobClient client.
 | ||||||
|  | func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { | ||||||
|  | 	return pageBlobClient{newManagementClient(url, p)} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ClearPages the Clear Pages operation clears a set of pages from a page blob
 | ||||||
|  | //
 | ||||||
|  | // contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more
 | ||||||
|  | // information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
 | ||||||
|  | // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
 | ||||||
|  | // ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
 | ||||||
|  | // less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
 | ||||||
|  | // if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
 | ||||||
|  | // only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
 | ||||||
|  | // on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
 | ||||||
|  | // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
 | ||||||
|  | // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
 | ||||||
|  | // recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobClearPagesResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // clearPagesPreparer prepares the ClearPages request.
 | ||||||
|  | func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "page") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if rangeParameter != nil { | ||||||
|  | 		req.Header.Set("x-ms-range", *rangeParameter) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThanOrEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThan != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-page-write", "clear") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // clearPagesResponder handles the response to the ClearPages request.
 | ||||||
|  | func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
 | ||||||
|  | // The snapshot is copied such that only the differential changes between the previously copied snapshot are
 | ||||||
|  | // transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or
 | ||||||
|  | // copied from as usual. This API is supported since REST version 2016-05-31.
 | ||||||
|  | //
 | ||||||
|  | // copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that
 | ||||||
|  | // specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob
 | ||||||
|  | // must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is
 | ||||||
|  | // expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> ifModifiedSince is specify this header value to operate only on a blob if
 | ||||||
|  | // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only
 | ||||||
|  | // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate
 | ||||||
|  | // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a
 | ||||||
|  | // matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded
 | ||||||
|  | // in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobCopyIncrementalResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobCopyIncrementalResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // copyIncrementalPreparer prepares the CopyIncremental request.
 | ||||||
|  | func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "incrementalcopy") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-copy-source", copySource) | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // copyIncrementalResponder handles the response to the CopyIncremental request.
 | ||||||
|  | func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusAccepted) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Create the Create operation creates a new page blob.
 | ||||||
|  | //
 | ||||||
|  | // contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page
 | ||||||
|  | // blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is
 | ||||||
|  | // expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> blobContentType is optional. Sets the blob's content type. If specified,
 | ||||||
|  | // this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the
 | ||||||
|  | // blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
 | ||||||
|  | // blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the
 | ||||||
|  | // blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this
 | ||||||
|  | // hash is not validated, as the hashes for the individual blocks were validated when each was uploaded.
 | ||||||
|  | // blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and
 | ||||||
|  | // returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the
 | ||||||
|  | // blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the
 | ||||||
|  | // destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
 | ||||||
|  | // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19,
 | ||||||
|  | // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and
 | ||||||
|  | // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is
 | ||||||
|  | // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header.
 | ||||||
|  | // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
 | ||||||
|  | // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
 | ||||||
|  | // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set
 | ||||||
|  | // for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
 | ||||||
|  | // the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1
 | ||||||
|  | // KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobCreateResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // createPreparer prepares the Create request.
 | ||||||
|  | func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if blobContentType != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-type", *blobContentType) | ||||||
|  | 	} | ||||||
|  | 	if blobContentEncoding != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) | ||||||
|  | 	} | ||||||
|  | 	if blobContentLanguage != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) | ||||||
|  | 	} | ||||||
|  | 	if blobContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if blobCacheControl != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) | ||||||
|  | 	} | ||||||
|  | 	if metadata != nil { | ||||||
|  | 		for k, v := range metadata { | ||||||
|  | 			req.Header.Set("x-ms-meta-"+k, v) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if blobContentDisposition != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) | ||||||
|  | 	if blobSequenceNumber != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-blob-type", "PageBlob") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // createResponder handles the response to the Create request.
 | ||||||
|  | func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobCreateResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a
 | ||||||
|  | // page blob
 | ||||||
|  | //
 | ||||||
|  | // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
 | ||||||
|  | // retrieve. For more information on working with blob snapshots, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
 | ||||||
|  | // a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
 | ||||||
|  | // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
 | ||||||
|  | // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified
 | ||||||
|  | // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified
 | ||||||
|  | // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a
 | ||||||
|  | // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageList), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPageRangesPreparer prepares the GetPageRanges request.
 | ||||||
|  | func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if snapshot != nil && len(*snapshot) > 0 { | ||||||
|  | 		params.Set("snapshot", *snapshot) | ||||||
|  | 	} | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "pagelist") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if rangeParameter != nil { | ||||||
|  | 		req.Header.Set("x-ms-range", *rangeParameter) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPageRangesResponder handles the response to the GetPageRanges request.
 | ||||||
|  | func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &PageList{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were
 | ||||||
|  | // changed between target blob and previous snapshot.
 | ||||||
|  | //
 | ||||||
|  | // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to
 | ||||||
|  | // retrieve. For more information on working with blob snapshots, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating
 | ||||||
|  | // a Snapshot of a Blob.</a> timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot
 | ||||||
|  | // parameter is a DateTime value that specifies that the response will contain only pages that were changed between
 | ||||||
|  | // target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a
 | ||||||
|  | // snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots
 | ||||||
|  | // are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes
 | ||||||
|  | // of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is
 | ||||||
|  | // active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been
 | ||||||
|  | // modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if
 | ||||||
|  | // it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
 | ||||||
|  | // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
 | ||||||
|  | // logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageList), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPageRangesDiffPreparer prepares the GetPageRangesDiff request.
 | ||||||
|  | func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if snapshot != nil && len(*snapshot) > 0 { | ||||||
|  | 		params.Set("snapshot", *snapshot) | ||||||
|  | 	} | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	if prevsnapshot != nil && len(*prevsnapshot) > 0 { | ||||||
|  | 		params.Set("prevsnapshot", *prevsnapshot) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "pagelist") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if rangeParameter != nil { | ||||||
|  | 		req.Header.Set("x-ms-range", *rangeParameter) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPageRangesDiffResponder handles the response to the GetPageRangesDiff request.
 | ||||||
|  | func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &PageList{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Resize resize the Blob
 | ||||||
|  | //
 | ||||||
|  | // blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must
 | ||||||
|  | // be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information,
 | ||||||
|  | // see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
 | ||||||
|  | // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
 | ||||||
|  | // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
 | ||||||
|  | // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
 | ||||||
|  | // requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics
 | ||||||
|  | // logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobResizeResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // resizePreparer prepares the Resize request.
 | ||||||
|  | func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "properties") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // resizeResponder handles the response to the Resize request.
 | ||||||
|  | func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobResizeResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UpdateSequenceNumber update the sequence number of the blob
 | ||||||
|  | //
 | ||||||
|  | // sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property
 | ||||||
|  | // applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout
 | ||||||
|  | // is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it
 | ||||||
|  | // has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a
 | ||||||
|  | // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on
 | ||||||
|  | // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value.
 | ||||||
|  | // blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can use to
 | ||||||
|  | // track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a
 | ||||||
|  | // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobUpdateSequenceNumberResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // updateSequenceNumberPreparer prepares the UpdateSequenceNumber request.
 | ||||||
|  | func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "properties") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) | ||||||
|  | 	if blobSequenceNumber != nil { | ||||||
|  | 		req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request.
 | ||||||
|  | func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadPages the Upload Pages operation writes a range of pages to a page blob
 | ||||||
|  | //
 | ||||||
|  | // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an
 | ||||||
|  | // error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the
 | ||||||
|  | // body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more
 | ||||||
|  | // information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> rangeParameter is return only the bytes of the blob in the specified
 | ||||||
|  | // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID.
 | ||||||
|  | // ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number
 | ||||||
|  | // less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob
 | ||||||
|  | // if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate
 | ||||||
|  | // only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only
 | ||||||
|  | // on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to
 | ||||||
|  | // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value
 | ||||||
|  | // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs
 | ||||||
|  | // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is
 | ||||||
|  | // recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: body, | ||||||
|  | 			constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobUploadPagesResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadPagesPreparer prepares the UploadPages request.
 | ||||||
|  | func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "page") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	if transactionalContentMD5 != nil { | ||||||
|  | 		req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	if rangeParameter != nil { | ||||||
|  | 		req.Header.Set("x-ms-range", *rangeParameter) | ||||||
|  | 	} | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThanOrEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThan != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-page-write", "update") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadPagesResponder handles the response to the UploadPages request.
 | ||||||
|  | func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read
 | ||||||
|  | // from a URL
 | ||||||
|  | //
 | ||||||
|  | // sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The
 | ||||||
|  | // length of this range should match the ContentLength header and x-ms-range/Range destination range header.
 | ||||||
|  | // contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be
 | ||||||
|  | // written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated
 | ||||||
|  | // for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in
 | ||||||
|  | // seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> leaseID is if specified, the operation only succeeds if the resource's
 | ||||||
|  | // lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only
 | ||||||
|  | // on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this
 | ||||||
|  | // header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo
 | ||||||
|  | // is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is
 | ||||||
|  | // specify this header value to operate only on a blob if it has been modified since the specified date/time.
 | ||||||
|  | // ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
 | ||||||
|  | // specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is
 | ||||||
|  | // specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this
 | ||||||
|  | // header value to operate only on a blob if it has been modified since the specified date/time.
 | ||||||
|  | // sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the
 | ||||||
|  | // specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value.
 | ||||||
|  | // sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides
 | ||||||
|  | // a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
 | ||||||
|  | // analytics logging is enabled.
 | ||||||
|  | func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*PageBlobUploadPagesFromURLResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadPagesFromURLPreparer prepares the UploadPagesFromURL request.
 | ||||||
|  | func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "page") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-copy-source", sourceURL) | ||||||
|  | 	req.Header.Set("x-ms-source-range", sourceRange) | ||||||
|  | 	if sourceContentMD5 != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) | ||||||
|  | 	req.Header.Set("x-ms-range", rangeParameter) | ||||||
|  | 	if leaseID != nil { | ||||||
|  | 		req.Header.Set("x-ms-lease-id", *leaseID) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThanOrEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberLessThan != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifSequenceNumberEqualTo != nil { | ||||||
|  | 		req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) | ||||||
|  | 	} | ||||||
|  | 	if ifModifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if ifMatch != nil { | ||||||
|  | 		req.Header.Set("If-Match", string(*ifMatch)) | ||||||
|  | 	} | ||||||
|  | 	if ifNoneMatch != nil { | ||||||
|  | 		req.Header.Set("If-None-Match", string(*ifNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfModifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfUnmodifiedSince != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) | ||||||
|  | 	} | ||||||
|  | 	if sourceIfNoneMatch != nil { | ||||||
|  | 		req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("x-ms-page-write", "update") | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request.
 | ||||||
|  | func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusCreated) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
							
								
								
									
										74
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										74
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,74 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/xml" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"io/ioutil" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type responder func(resp pipeline.Response) (result pipeline.Response, err error) | ||||||
|  | 
 | ||||||
|  | // ResponderPolicyFactory is a Factory capable of creating a responder pipeline.
 | ||||||
|  | type responderPolicyFactory struct { | ||||||
|  | 	responder responder | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // New creates a responder policy factory.
 | ||||||
|  | func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { | ||||||
|  | 	return responderPolicy{next: next, responder: arpf.responder} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type responderPolicy struct { | ||||||
|  | 	next      pipeline.Policy | ||||||
|  | 	responder responder | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Do sends the request to the service and validates/deserializes the HTTP response.
 | ||||||
|  | func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { | ||||||
|  | 	resp, err := arp.next.Do(ctx, request) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return resp, err | ||||||
|  | 	} | ||||||
|  | 	return arp.responder(resp) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // validateResponse checks an HTTP response's status code against a legal set of codes.
 | ||||||
|  | // If the response code is not legal, then validateResponse reads all of the response's body
 | ||||||
|  | // (containing error information) and returns a response error.
 | ||||||
|  | func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return NewResponseError(nil, nil, "nil response") | ||||||
|  | 	} | ||||||
|  | 	responseCode := resp.Response().StatusCode | ||||||
|  | 	for _, i := range successStatusCodes { | ||||||
|  | 		if i == responseCode { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	// only close the body in the failure case. in the
 | ||||||
|  | 	// success case responders will close the body as required.
 | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	// the service code, description and details will be populated during unmarshalling
 | ||||||
|  | 	responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		if err = xml.Unmarshal(b, &responseError); err != nil { | ||||||
|  | 			return NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return responseError | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // removes any BOM from the byte slice
 | ||||||
|  | func removeBOM(b []byte) []byte { | ||||||
|  | 	// UTF8
 | ||||||
|  | 	return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) | ||||||
|  | } | ||||||
							
								
								
									
										95
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										95
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,95 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"net" | ||||||
|  | 	"net/http" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // if you want to provide custom error handling set this variable to your constructor function
 | ||||||
|  | var responseErrorFactory func(cause error, response *http.Response, description string) error | ||||||
|  | 
 | ||||||
|  | // ResponseError identifies a responder-generated network or response parsing error.
 | ||||||
|  | type ResponseError interface { | ||||||
|  | 	// Error exposes the Error(), Temporary() and Timeout() methods.
 | ||||||
|  | 	net.Error // Includes the Go error interface
 | ||||||
|  | 	// Response returns the HTTP response. You may examine this but you should not modify it.
 | ||||||
|  | 	Response() *http.Response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewResponseError creates an error object that implements the error interface.
 | ||||||
|  | func NewResponseError(cause error, response *http.Response, description string) error { | ||||||
|  | 	if responseErrorFactory != nil { | ||||||
|  | 		return responseErrorFactory(cause, response, description) | ||||||
|  | 	} | ||||||
|  | 	return &responseError{ | ||||||
|  | 		ErrorNode:   pipeline.ErrorNode{}.Initialize(cause, 3), | ||||||
|  | 		response:    response, | ||||||
|  | 		description: description, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // responseError is the internal struct that implements the public ResponseError interface.
 | ||||||
|  | type responseError struct { | ||||||
|  | 	pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause
 | ||||||
|  | 	response           *http.Response | ||||||
|  | 	description        string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error implements the error interface's Error method to return a string representation of the error.
 | ||||||
|  | func (e *responseError) Error() string { | ||||||
|  | 	b := &bytes.Buffer{} | ||||||
|  | 	fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) | ||||||
|  | 	fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) | ||||||
|  | 	s := b.String() | ||||||
|  | 	return e.ErrorNode.Error(s) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Response implements the ResponseError interface's method to return the HTTP response.
 | ||||||
|  | func (e *responseError) Response() *http.Response { | ||||||
|  | 	return e.response | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RFC7807 PROBLEM ------------------------------------------------------------------------------------
 | ||||||
|  | // RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members.
 | ||||||
|  | /*type RFC7807Problem struct { | ||||||
|  | 	// Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation).
 | ||||||
|  | 	typeURI string // Should default to "about:blank"
 | ||||||
|  | 	// Optional: Short, human-readable summary (maybe localized).
 | ||||||
|  | 	title string | ||||||
|  | 	// Optional: HTTP status code generated by the origin server
 | ||||||
|  | 	status int | ||||||
|  | 	// Optional: Human-readable explanation for this problem occurance.
 | ||||||
|  | 	// Should help client correct the problem. Clients should NOT parse this string.
 | ||||||
|  | 	detail string | ||||||
|  | 	// Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced).
 | ||||||
|  | 	instance string | ||||||
|  | } | ||||||
|  | // NewRFC7807Problem ...
 | ||||||
|  | func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { | ||||||
|  | 	return &RFC7807Problem{ | ||||||
|  | 		typeURI: typeURI, | ||||||
|  | 		status:  status, | ||||||
|  | 		title:   fmt.Sprintf(titleFormat, a...), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | // Error returns the error information as a string.
 | ||||||
|  | func (e *RFC7807Problem) Error() string { | ||||||
|  | 	return e.title | ||||||
|  | } | ||||||
|  | // TypeURI ...
 | ||||||
|  | func (e *RFC7807Problem) TypeURI() string { | ||||||
|  | 	if e.typeURI == "" { | ||||||
|  | 		e.typeURI = "about:blank" | ||||||
|  | 	} | ||||||
|  | 	return e.typeURI | ||||||
|  | } | ||||||
|  | // Members ...
 | ||||||
|  | func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { | ||||||
|  | 	return e.status, e.title, e.detail, e.instance | ||||||
|  | }*/ | ||||||
							
								
								
									
										467
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										467
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,467 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"context" | ||||||
|  | 	"encoding/xml" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 	"strconv" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // serviceClient is the client for the Service methods of the Azblob service.
 | ||||||
|  | type serviceClient struct { | ||||||
|  | 	managementClient | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newServiceClient creates an instance of the serviceClient client.
 | ||||||
|  | func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { | ||||||
|  | 	return serviceClient{newManagementClient(url, p)} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetAccountInfo returns the sku name and account kind
 | ||||||
|  | func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { | ||||||
|  | 	req, err := client.getAccountInfoPreparer() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*ServiceGetAccountInfoResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getAccountInfoPreparer prepares the GetAccountInfo request.
 | ||||||
|  | func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	params.Set("restype", "account") | ||||||
|  | 	params.Set("comp", "properties") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getAccountInfoResponder handles the response to the GetAccountInfo request.
 | ||||||
|  | func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics
 | ||||||
|  | // and CORS (Cross-Origin Resource Sharing) rules.
 | ||||||
|  | //
 | ||||||
|  | // timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
 | ||||||
|  | // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getPropertiesPreparer(timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*StorageServiceProperties), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPropertiesPreparer prepares the GetProperties request.
 | ||||||
|  | func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("restype", "service") | ||||||
|  | 	params.Set("comp", "properties") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getPropertiesResponder handles the response to the GetProperties request.
 | ||||||
|  | func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &StorageServiceProperties{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the
 | ||||||
|  | // secondary location endpoint when read-access geo-redundant replication is enabled for the storage account.
 | ||||||
|  | //
 | ||||||
|  | // timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
 | ||||||
|  | // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getStatisticsPreparer(timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*StorageServiceStats), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getStatisticsPreparer prepares the GetStatistics request.
 | ||||||
|  | func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("restype", "service") | ||||||
|  | 	params.Set("comp", "stats") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getStatisticsResponder handles the response to the GetStatistics request.
 | ||||||
|  | func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &StorageServiceStats{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using
 | ||||||
|  | // bearer token authentication.
 | ||||||
|  | //
 | ||||||
|  | // timeout is the timeout parameter is expressed in seconds. For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
 | ||||||
|  | // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*UserDelegationKey), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getUserDelegationKeyPreparer prepares the GetUserDelegationKey request.
 | ||||||
|  | func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("POST", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("restype", "service") | ||||||
|  | 	params.Set("comp", "userdelegationkey") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	b, err := xml.Marshal(keyInfo) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to marshal request body") | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("Content-Type", "application/xml") | ||||||
|  | 	err = req.SetBody(bytes.NewReader(b)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to set request body") | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request.
 | ||||||
|  | func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &UserDelegationKey{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified
 | ||||||
|  | // account
 | ||||||
|  | //
 | ||||||
|  | // prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a
 | ||||||
|  | // string value that identifies the portion of the list of containers to be returned with the next listing operation.
 | ||||||
|  | // The operation returns the NextMarker value within the response body if the listing operation did not return all
 | ||||||
|  | // containers remaining to be listed with the current page. The NextMarker value can be used as the value for the
 | ||||||
|  | // marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the
 | ||||||
|  | // client. maxresults is specifies the maximum number of containers to return. If the request does not specify
 | ||||||
|  | // maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the
 | ||||||
|  | // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the
 | ||||||
|  | // remainder of the results. For this reason, it is possible that the service will return fewer results than specified
 | ||||||
|  | // by maxresults, or than the default of 5000. include is include this parameter to specify that the container's
 | ||||||
|  | // metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For
 | ||||||
|  | // more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
 | ||||||
|  | // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: maxresults, | ||||||
|  | 			constraints: []constraint{{target: "maxresults", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*ListContainersSegmentResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // listContainersSegmentPreparer prepares the ListContainersSegment request.
 | ||||||
|  | func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("GET", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if prefix != nil && len(*prefix) > 0 { | ||||||
|  | 		params.Set("prefix", *prefix) | ||||||
|  | 	} | ||||||
|  | 	if marker != nil && len(*marker) > 0 { | ||||||
|  | 		params.Set("marker", *marker) | ||||||
|  | 	} | ||||||
|  | 	if maxresults != nil { | ||||||
|  | 		params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) | ||||||
|  | 	} | ||||||
|  | 	if include != ListContainersIncludeNone { | ||||||
|  | 		params.Set("include", string(include)) | ||||||
|  | 	} | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("comp", "list") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // listContainersSegmentResponder handles the response to the ListContainersSegment request.
 | ||||||
|  | func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	result := &ListContainersSegmentResponse{rawResponse: resp.Response()} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	defer resp.Response().Body.Close() | ||||||
|  | 	b, err := ioutil.ReadAll(resp.Response().Body) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return result, err | ||||||
|  | 	} | ||||||
|  | 	if len(b) > 0 { | ||||||
|  | 		b = removeBOM(b) | ||||||
|  | 		err = xml.Unmarshal(b, result) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return result, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage
 | ||||||
|  | // Analytics and CORS (Cross-Origin Resource Sharing) rules
 | ||||||
|  | //
 | ||||||
|  | // storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds.
 | ||||||
|  | // For more information, see <a
 | ||||||
|  | // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting
 | ||||||
|  | // Timeouts for Blob Service Operations.</a> requestID is provides a client-generated, opaque value with a 1 KB
 | ||||||
|  | // character limit that is recorded in the analytics logs when storage analytics logging is enabled.
 | ||||||
|  | func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { | ||||||
|  | 	if err := validate([]validation{ | ||||||
|  | 		{targetValue: storageServiceProperties, | ||||||
|  | 			constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, | ||||||
|  | 					chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, | ||||||
|  | 						chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, | ||||||
|  | 					}}, | ||||||
|  | 				}}, | ||||||
|  | 				{target: "storageServiceProperties.HourMetrics", name: null, rule: false, | ||||||
|  | 					chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, | ||||||
|  | 						chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, | ||||||
|  | 							chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, | ||||||
|  | 						}}, | ||||||
|  | 					}}, | ||||||
|  | 				{target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, | ||||||
|  | 					chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, | ||||||
|  | 						chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, | ||||||
|  | 							chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, | ||||||
|  | 						}}, | ||||||
|  | 					}}, | ||||||
|  | 				{target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, | ||||||
|  | 					chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, | ||||||
|  | 						chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, | ||||||
|  | 					}}}}, | ||||||
|  | 		{targetValue: timeout, | ||||||
|  | 			constraints: []constraint{{target: "timeout", name: null, rule: false, | ||||||
|  | 				chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return resp.(*ServiceSetPropertiesResponse), err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // setPropertiesPreparer prepares the SetProperties request.
 | ||||||
|  | func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { | ||||||
|  | 	req, err := pipeline.NewRequest("PUT", client.url, nil) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to create request") | ||||||
|  | 	} | ||||||
|  | 	params := req.URL.Query() | ||||||
|  | 	if timeout != nil { | ||||||
|  | 		params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) | ||||||
|  | 	} | ||||||
|  | 	params.Set("restype", "service") | ||||||
|  | 	params.Set("comp", "properties") | ||||||
|  | 	req.URL.RawQuery = params.Encode() | ||||||
|  | 	req.Header.Set("x-ms-version", ServiceVersion) | ||||||
|  | 	if requestID != nil { | ||||||
|  | 		req.Header.Set("x-ms-client-request-id", *requestID) | ||||||
|  | 	} | ||||||
|  | 	b, err := xml.Marshal(storageServiceProperties) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to marshal request body") | ||||||
|  | 	} | ||||||
|  | 	req.Header.Set("Content-Type", "application/xml") | ||||||
|  | 	err = req.SetBody(bytes.NewReader(b)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return req, pipeline.NewError(err, "failed to set request body") | ||||||
|  | 	} | ||||||
|  | 	return req, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // setPropertiesResponder handles the response to the SetProperties request.
 | ||||||
|  | func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { | ||||||
|  | 	err := validateResponse(resp, http.StatusOK, http.StatusAccepted) | ||||||
|  | 	if resp == nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	io.Copy(ioutil.Discard, resp.Response().Body) | ||||||
|  | 	resp.Response().Body.Close() | ||||||
|  | 	return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err | ||||||
|  | } | ||||||
							
								
								
									
										367
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										367
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,367 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"github.com/Azure/azure-pipeline-go/pipeline" | ||||||
|  | 	"reflect" | ||||||
|  | 	"regexp" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Constraint stores constraint name, target field name
 | ||||||
|  | // Rule and chain validations.
 | ||||||
|  | type constraint struct { | ||||||
|  | 	// Target field name for validation.
 | ||||||
|  | 	target string | ||||||
|  | 
 | ||||||
|  | 	// Constraint name e.g. minLength, MaxLength, Pattern, etc.
 | ||||||
|  | 	name string | ||||||
|  | 
 | ||||||
|  | 	// Rule for constraint e.g. greater than 10, less than 5 etc.
 | ||||||
|  | 	rule interface{} | ||||||
|  | 
 | ||||||
|  | 	// Chain validations for struct type
 | ||||||
|  | 	chain []constraint | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Validation stores parameter-wise validation.
 | ||||||
|  | type validation struct { | ||||||
|  | 	targetValue interface{} | ||||||
|  | 	constraints []constraint | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Constraint list
 | ||||||
|  | const ( | ||||||
|  | 	empty            = "Empty" | ||||||
|  | 	null             = "Null" | ||||||
|  | 	readOnly         = "ReadOnly" | ||||||
|  | 	pattern          = "Pattern" | ||||||
|  | 	maxLength        = "MaxLength" | ||||||
|  | 	minLength        = "MinLength" | ||||||
|  | 	maxItems         = "MaxItems" | ||||||
|  | 	minItems         = "MinItems" | ||||||
|  | 	multipleOf       = "MultipleOf" | ||||||
|  | 	uniqueItems      = "UniqueItems" | ||||||
|  | 	inclusiveMaximum = "InclusiveMaximum" | ||||||
|  | 	exclusiveMaximum = "ExclusiveMaximum" | ||||||
|  | 	exclusiveMinimum = "ExclusiveMinimum" | ||||||
|  | 	inclusiveMinimum = "InclusiveMinimum" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Validate method validates constraints on parameter
 | ||||||
|  | // passed in validation array.
 | ||||||
|  | func validate(m []validation) error { | ||||||
|  | 	for _, item := range m { | ||||||
|  | 		v := reflect.ValueOf(item.targetValue) | ||||||
|  | 		for _, constraint := range item.constraints { | ||||||
|  | 			var err error | ||||||
|  | 			switch v.Kind() { | ||||||
|  | 			case reflect.Ptr: | ||||||
|  | 				err = validatePtr(v, constraint) | ||||||
|  | 			case reflect.String: | ||||||
|  | 				err = validateString(v, constraint) | ||||||
|  | 			case reflect.Struct: | ||||||
|  | 				err = validateStruct(v, constraint) | ||||||
|  | 			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||||
|  | 				err = validateInt(v, constraint) | ||||||
|  | 			case reflect.Float32, reflect.Float64: | ||||||
|  | 				err = validateFloat(v, constraint) | ||||||
|  | 			case reflect.Array, reflect.Slice, reflect.Map: | ||||||
|  | 				err = validateArrayMap(v, constraint) | ||||||
|  | 			default: | ||||||
|  | 				err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) | ||||||
|  | 			} | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validateStruct(x reflect.Value, v constraint, name ...string) error { | ||||||
|  | 	//Get field name from target name which is in format a.b.c
 | ||||||
|  | 	s := strings.Split(v.target, ".") | ||||||
|  | 	f := x.FieldByName(s[len(s)-1]) | ||||||
|  | 	if isZero(f) { | ||||||
|  | 		return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) | ||||||
|  | 	} | ||||||
|  | 	err := validate([]validation{ | ||||||
|  | 		{ | ||||||
|  | 			targetValue: getInterfaceValue(f), | ||||||
|  | 			constraints: []constraint{v}, | ||||||
|  | 		}, | ||||||
|  | 	}) | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validatePtr(x reflect.Value, v constraint) error { | ||||||
|  | 	if v.name == readOnly { | ||||||
|  | 		if !x.IsNil() { | ||||||
|  | 			return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	if x.IsNil() { | ||||||
|  | 		return checkNil(x, v) | ||||||
|  | 	} | ||||||
|  | 	if v.chain != nil { | ||||||
|  | 		return validate([]validation{ | ||||||
|  | 			{ | ||||||
|  | 				targetValue: getInterfaceValue(x.Elem()), | ||||||
|  | 				constraints: v.chain, | ||||||
|  | 			}, | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validateInt(x reflect.Value, v constraint) error { | ||||||
|  | 	i := x.Int() | ||||||
|  | 	r, ok := v.rule.(int) | ||||||
|  | 	if !ok { | ||||||
|  | 		return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 	} | ||||||
|  | 	switch v.name { | ||||||
|  | 	case multipleOf: | ||||||
|  | 		if i%int64(r) != 0 { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case exclusiveMinimum: | ||||||
|  | 		if i <= int64(r) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case exclusiveMaximum: | ||||||
|  | 		if i >= int64(r) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be less than %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case inclusiveMinimum: | ||||||
|  | 		if i < int64(r) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case inclusiveMaximum: | ||||||
|  | 		if i > int64(r) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) | ||||||
|  | 		} | ||||||
|  | 	default: | ||||||
|  | 		return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validateFloat(x reflect.Value, v constraint) error { | ||||||
|  | 	f := x.Float() | ||||||
|  | 	r, ok := v.rule.(float64) | ||||||
|  | 	if !ok { | ||||||
|  | 		return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 	} | ||||||
|  | 	switch v.name { | ||||||
|  | 	case exclusiveMinimum: | ||||||
|  | 		if f <= r { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case exclusiveMaximum: | ||||||
|  | 		if f >= r { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be less than %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case inclusiveMinimum: | ||||||
|  | 		if f < r { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) | ||||||
|  | 		} | ||||||
|  | 	case inclusiveMaximum: | ||||||
|  | 		if f > r { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) | ||||||
|  | 		} | ||||||
|  | 	default: | ||||||
|  | 		return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validateString(x reflect.Value, v constraint) error { | ||||||
|  | 	s := x.String() | ||||||
|  | 	switch v.name { | ||||||
|  | 	case empty: | ||||||
|  | 		if len(s) == 0 { | ||||||
|  | 			return checkEmpty(x, v) | ||||||
|  | 		} | ||||||
|  | 	case pattern: | ||||||
|  | 		reg, err := regexp.Compile(v.rule.(string)) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return createError(x, v, err.Error()) | ||||||
|  | 		} | ||||||
|  | 		if !reg.MatchString(s) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) | ||||||
|  | 		} | ||||||
|  | 	case maxLength: | ||||||
|  | 		if _, ok := v.rule.(int); !ok { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 		} | ||||||
|  | 		if len(s) > v.rule.(int) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) | ||||||
|  | 		} | ||||||
|  | 	case minLength: | ||||||
|  | 		if _, ok := v.rule.(int); !ok { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 		} | ||||||
|  | 		if len(s) < v.rule.(int) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) | ||||||
|  | 		} | ||||||
|  | 	case readOnly: | ||||||
|  | 		if len(s) > 0 { | ||||||
|  | 			return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") | ||||||
|  | 		} | ||||||
|  | 	default: | ||||||
|  | 		return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) | ||||||
|  | 	} | ||||||
|  | 	if v.chain != nil { | ||||||
|  | 		return validate([]validation{ | ||||||
|  | 			{ | ||||||
|  | 				targetValue: getInterfaceValue(x), | ||||||
|  | 				constraints: v.chain, | ||||||
|  | 			}, | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func validateArrayMap(x reflect.Value, v constraint) error { | ||||||
|  | 	switch v.name { | ||||||
|  | 	case null: | ||||||
|  | 		if x.IsNil() { | ||||||
|  | 			return checkNil(x, v) | ||||||
|  | 		} | ||||||
|  | 	case empty: | ||||||
|  | 		if x.IsNil() || x.Len() == 0 { | ||||||
|  | 			return checkEmpty(x, v) | ||||||
|  | 		} | ||||||
|  | 	case maxItems: | ||||||
|  | 		if _, ok := v.rule.(int); !ok { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 		} | ||||||
|  | 		if x.Len() > v.rule.(int) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) | ||||||
|  | 		} | ||||||
|  | 	case minItems: | ||||||
|  | 		if _, ok := v.rule.(int); !ok { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 		} | ||||||
|  | 		if x.Len() < v.rule.(int) { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) | ||||||
|  | 		} | ||||||
|  | 	case uniqueItems: | ||||||
|  | 		if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { | ||||||
|  | 			if !checkForUniqueInArray(x) { | ||||||
|  | 				return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) | ||||||
|  | 			} | ||||||
|  | 		} else if x.Kind() == reflect.Map { | ||||||
|  | 			if !checkForUniqueInMap(x) { | ||||||
|  | 				return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) | ||||||
|  | 		} | ||||||
|  | 	case readOnly: | ||||||
|  | 		if x.Len() != 0 { | ||||||
|  | 			return createError(x, v, "readonly parameter; must send as nil or empty in request") | ||||||
|  | 		} | ||||||
|  | 	case pattern: | ||||||
|  | 		reg, err := regexp.Compile(v.rule.(string)) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return createError(x, v, err.Error()) | ||||||
|  | 		} | ||||||
|  | 		keys := x.MapKeys() | ||||||
|  | 		for _, k := range keys { | ||||||
|  | 			if !reg.MatchString(k.String()) { | ||||||
|  | 				return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	default: | ||||||
|  | 		return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) | ||||||
|  | 	} | ||||||
|  | 	if v.chain != nil { | ||||||
|  | 		return validate([]validation{ | ||||||
|  | 			{ | ||||||
|  | 				targetValue: getInterfaceValue(x), | ||||||
|  | 				constraints: v.chain, | ||||||
|  | 			}, | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func checkNil(x reflect.Value, v constraint) error { | ||||||
|  | 	if _, ok := v.rule.(bool); !ok { | ||||||
|  | 		return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 	} | ||||||
|  | 	if v.rule.(bool) { | ||||||
|  | 		return createError(x, v, "value can not be null; required parameter") | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func checkEmpty(x reflect.Value, v constraint) error { | ||||||
|  | 	if _, ok := v.rule.(bool); !ok { | ||||||
|  | 		return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) | ||||||
|  | 	} | ||||||
|  | 	if v.rule.(bool) { | ||||||
|  | 		return createError(x, v, "value can not be null or empty; required parameter") | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func checkForUniqueInArray(x reflect.Value) bool { | ||||||
|  | 	if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	arrOfInterface := make([]interface{}, x.Len()) | ||||||
|  | 	for i := 0; i < x.Len(); i++ { | ||||||
|  | 		arrOfInterface[i] = x.Index(i).Interface() | ||||||
|  | 	} | ||||||
|  | 	m := make(map[interface{}]bool) | ||||||
|  | 	for _, val := range arrOfInterface { | ||||||
|  | 		if m[val] { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		m[val] = true | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func checkForUniqueInMap(x reflect.Value) bool { | ||||||
|  | 	if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	mapOfInterface := make(map[interface{}]interface{}, x.Len()) | ||||||
|  | 	keys := x.MapKeys() | ||||||
|  | 	for _, k := range keys { | ||||||
|  | 		mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() | ||||||
|  | 	} | ||||||
|  | 	m := make(map[interface{}]bool) | ||||||
|  | 	for _, val := range mapOfInterface { | ||||||
|  | 		if m[val] { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 		m[val] = true | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getInterfaceValue(x reflect.Value) interface{} { | ||||||
|  | 	if x.Kind() == reflect.Invalid { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	return x.Interface() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func isZero(x interface{}) bool { | ||||||
|  | 	return x == reflect.Zero(reflect.TypeOf(x)).Interface() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func createError(x reflect.Value, v constraint, message string) error { | ||||||
|  | 	return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", | ||||||
|  | 		v.target, v.name, getInterfaceValue(x), message)) | ||||||
|  | } | ||||||
							
								
								
									
										14
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										14
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,14 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | // Code generated by Microsoft (R) AutoRest Code Generator.
 | ||||||
|  | // Changes may cause incorrect behavior and will be lost if the code is regenerated.
 | ||||||
|  | 
 | ||||||
|  | // UserAgent returns the UserAgent string to use when sending http.Requests.
 | ||||||
|  | func UserAgent() string { | ||||||
|  | 	return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Version returns the semantic version (see http://semver.org) of the client.
 | ||||||
|  | func Version() string { | ||||||
|  | 	return "0.0.0" | ||||||
|  | } | ||||||
							
								
								
									
										242
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										242
									
								
								vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,242 @@ | ||||||
|  | package azblob | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"io" | ||||||
|  | 	"net/http" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BlobHTTPHeaders contains read/writeable blob properties.
 | ||||||
|  | type BlobHTTPHeaders struct { | ||||||
|  | 	ContentType        string | ||||||
|  | 	ContentMD5         []byte | ||||||
|  | 	ContentEncoding    string | ||||||
|  | 	ContentLanguage    string | ||||||
|  | 	ContentDisposition string | ||||||
|  | 	CacheControl       string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewHTTPHeaders returns the user-modifiable properties for this blob.
 | ||||||
|  | func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { | ||||||
|  | 	return BlobHTTPHeaders{ | ||||||
|  | 		ContentType:        bgpr.ContentType(), | ||||||
|  | 		ContentEncoding:    bgpr.ContentEncoding(), | ||||||
|  | 		ContentLanguage:    bgpr.ContentLanguage(), | ||||||
|  | 		ContentDisposition: bgpr.ContentDisposition(), | ||||||
|  | 		CacheControl:       bgpr.CacheControl(), | ||||||
|  | 		ContentMD5:         bgpr.ContentMD5(), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | // NewHTTPHeaders returns the user-modifiable properties for this blob.
 | ||||||
|  | func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { | ||||||
|  | 	return BlobHTTPHeaders{ | ||||||
|  | 		ContentType:        dr.ContentType(), | ||||||
|  | 		ContentEncoding:    dr.ContentEncoding(), | ||||||
|  | 		ContentLanguage:    dr.ContentLanguage(), | ||||||
|  | 		ContentDisposition: dr.ContentDisposition(), | ||||||
|  | 		CacheControl:       dr.CacheControl(), | ||||||
|  | 		ContentMD5:         dr.ContentMD5(), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ///////////////////////////////////////////////////////////////////////////////
 | ||||||
|  | 
 | ||||||
|  | // DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry.
 | ||||||
|  | type DownloadResponse struct { | ||||||
|  | 	r       *downloadResponse | ||||||
|  | 	ctx     context.Context | ||||||
|  | 	b       BlobURL | ||||||
|  | 	getInfo HTTPGetterInfo | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Body constructs new RetryReader stream for reading data. If a connection failes
 | ||||||
|  | // while reading, it will make additional requests to reestablish a connection and
 | ||||||
|  | // continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
 | ||||||
|  | // (the default), returns the original response body and no retries will be performed.
 | ||||||
|  | func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { | ||||||
|  | 	if o.MaxRetryRequests == 0 { // No additional retries
 | ||||||
|  | 		return r.Response().Body | ||||||
|  | 	} | ||||||
|  | 	return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, | ||||||
|  | 		func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { | ||||||
|  | 			resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, | ||||||
|  | 				BlobAccessConditions{ | ||||||
|  | 					ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, | ||||||
|  | 				}, | ||||||
|  | 				false) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, err | ||||||
|  | 			} | ||||||
|  | 			return resp.Response(), err | ||||||
|  | 		}, | ||||||
|  | 	) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Response returns the raw HTTP response object.
 | ||||||
|  | func (r DownloadResponse) Response() *http.Response { | ||||||
|  | 	return r.r.Response() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewHTTPHeaders returns the user-modifiable properties for this blob.
 | ||||||
|  | func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { | ||||||
|  | 	return r.r.NewHTTPHeaders() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobContentMD5 returns the value for header x-ms-blob-content-md5.
 | ||||||
|  | func (r DownloadResponse) BlobContentMD5() []byte { | ||||||
|  | 	return r.r.BlobContentMD5() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentMD5 returns the value for header Content-MD5.
 | ||||||
|  | func (r DownloadResponse) ContentMD5() []byte { | ||||||
|  | 	return r.r.ContentMD5() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StatusCode returns the HTTP status code of the response, e.g. 200.
 | ||||||
|  | func (r DownloadResponse) StatusCode() int { | ||||||
|  | 	return r.r.StatusCode() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Status returns the HTTP status message of the response, e.g. "200 OK".
 | ||||||
|  | func (r DownloadResponse) Status() string { | ||||||
|  | 	return r.r.Status() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AcceptRanges returns the value for header Accept-Ranges.
 | ||||||
|  | func (r DownloadResponse) AcceptRanges() string { | ||||||
|  | 	return r.r.AcceptRanges() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count.
 | ||||||
|  | func (r DownloadResponse) BlobCommittedBlockCount() int32 { | ||||||
|  | 	return r.r.BlobCommittedBlockCount() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobSequenceNumber returns the value for header x-ms-blob-sequence-number.
 | ||||||
|  | func (r DownloadResponse) BlobSequenceNumber() int64 { | ||||||
|  | 	return r.r.BlobSequenceNumber() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BlobType returns the value for header x-ms-blob-type.
 | ||||||
|  | func (r DownloadResponse) BlobType() BlobType { | ||||||
|  | 	return r.r.BlobType() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CacheControl returns the value for header Cache-Control.
 | ||||||
|  | func (r DownloadResponse) CacheControl() string { | ||||||
|  | 	return r.r.CacheControl() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentDisposition returns the value for header Content-Disposition.
 | ||||||
|  | func (r DownloadResponse) ContentDisposition() string { | ||||||
|  | 	return r.r.ContentDisposition() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentEncoding returns the value for header Content-Encoding.
 | ||||||
|  | func (r DownloadResponse) ContentEncoding() string { | ||||||
|  | 	return r.r.ContentEncoding() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentLanguage returns the value for header Content-Language.
 | ||||||
|  | func (r DownloadResponse) ContentLanguage() string { | ||||||
|  | 	return r.r.ContentLanguage() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentLength returns the value for header Content-Length.
 | ||||||
|  | func (r DownloadResponse) ContentLength() int64 { | ||||||
|  | 	return r.r.ContentLength() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentRange returns the value for header Content-Range.
 | ||||||
|  | func (r DownloadResponse) ContentRange() string { | ||||||
|  | 	return r.r.ContentRange() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ContentType returns the value for header Content-Type.
 | ||||||
|  | func (r DownloadResponse) ContentType() string { | ||||||
|  | 	return r.r.ContentType() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyCompletionTime returns the value for header x-ms-copy-completion-time.
 | ||||||
|  | func (r DownloadResponse) CopyCompletionTime() time.Time { | ||||||
|  | 	return r.r.CopyCompletionTime() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyID returns the value for header x-ms-copy-id.
 | ||||||
|  | func (r DownloadResponse) CopyID() string { | ||||||
|  | 	return r.r.CopyID() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyProgress returns the value for header x-ms-copy-progress.
 | ||||||
|  | func (r DownloadResponse) CopyProgress() string { | ||||||
|  | 	return r.r.CopyProgress() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopySource returns the value for header x-ms-copy-source.
 | ||||||
|  | func (r DownloadResponse) CopySource() string { | ||||||
|  | 	return r.r.CopySource() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyStatus returns the value for header x-ms-copy-status.
 | ||||||
|  | func (r DownloadResponse) CopyStatus() CopyStatusType { | ||||||
|  | 	return r.r.CopyStatus() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyStatusDescription returns the value for header x-ms-copy-status-description.
 | ||||||
|  | func (r DownloadResponse) CopyStatusDescription() string { | ||||||
|  | 	return r.r.CopyStatusDescription() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Date returns the value for header Date.
 | ||||||
|  | func (r DownloadResponse) Date() time.Time { | ||||||
|  | 	return r.r.Date() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ETag returns the value for header ETag.
 | ||||||
|  | func (r DownloadResponse) ETag() ETag { | ||||||
|  | 	return r.r.ETag() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IsServerEncrypted returns the value for header x-ms-server-encrypted.
 | ||||||
|  | func (r DownloadResponse) IsServerEncrypted() string { | ||||||
|  | 	return r.r.IsServerEncrypted() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LastModified returns the value for header Last-Modified.
 | ||||||
|  | func (r DownloadResponse) LastModified() time.Time { | ||||||
|  | 	return r.r.LastModified() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LeaseDuration returns the value for header x-ms-lease-duration.
 | ||||||
|  | func (r DownloadResponse) LeaseDuration() LeaseDurationType { | ||||||
|  | 	return r.r.LeaseDuration() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LeaseState returns the value for header x-ms-lease-state.
 | ||||||
|  | func (r DownloadResponse) LeaseState() LeaseStateType { | ||||||
|  | 	return r.r.LeaseState() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LeaseStatus returns the value for header x-ms-lease-status.
 | ||||||
|  | func (r DownloadResponse) LeaseStatus() LeaseStatusType { | ||||||
|  | 	return r.r.LeaseStatus() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RequestID returns the value for header x-ms-request-id.
 | ||||||
|  | func (r DownloadResponse) RequestID() string { | ||||||
|  | 	return r.r.RequestID() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Version returns the value for header x-ms-version.
 | ||||||
|  | func (r DownloadResponse) Version() string { | ||||||
|  | 	return r.r.Version() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewMetadata returns user-defined key/value pairs.
 | ||||||
|  | func (r DownloadResponse) NewMetadata() Metadata { | ||||||
|  | 	return r.r.NewMetadata() | ||||||
|  | } | ||||||
|  | @ -0,0 +1,11 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files
 | ||||||
|  | func GetProxyFunc() func(*http.Request) (*url.URL, error) { | ||||||
|  | 	return proxyMiddleman() | ||||||
|  | } | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | MIT License | ||||||
|  | 
 | ||||||
|  | Copyright (c) 2014 mattn | ||||||
|  | Copyright (c) 2017 oliverpool | ||||||
|  | Copyright (c) 2019 Adele Reed | ||||||
|  | 
 | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  | of this software and associated documentation files (the "Software"), to deal | ||||||
|  | in the Software without restriction, including without limitation the rights | ||||||
|  | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  | copies of the Software, and to permit persons to whom the Software is | ||||||
|  | furnished to do so, subject to the following conditions: | ||||||
|  | 
 | ||||||
|  | The above copyright notice and this permission notice shall be included in all | ||||||
|  | copies or substantial portions of the Software. | ||||||
|  | 
 | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  | SOFTWARE. | ||||||
|  | @ -0,0 +1,51 @@ | ||||||
|  | // Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows)
 | ||||||
|  | //
 | ||||||
|  | // On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms
 | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import "os" | ||||||
|  | 
 | ||||||
|  | // ProxyConf gathers the configuration for proxy
 | ||||||
|  | type ProxyConf struct { | ||||||
|  | 	Static    StaticProxyConf // static configuration
 | ||||||
|  | 	Automatic ProxyScriptConf // script configuration
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // StaticProxyConf contains the configuration for static proxy
 | ||||||
|  | type StaticProxyConf struct { | ||||||
|  | 	// Is the proxy active?
 | ||||||
|  | 	Active bool | ||||||
|  | 	// Proxy address for each scheme (http, https)
 | ||||||
|  | 	// "" (empty string) is the fallback proxy
 | ||||||
|  | 	Protocols map[string]string | ||||||
|  | 	// Addresses not to be browsed via the proxy (comma-separated, linux-like)
 | ||||||
|  | 	NoProxy string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ProxyScriptConf contains the configuration for automatic proxy
 | ||||||
|  | type ProxyScriptConf struct { | ||||||
|  | 	// Is the proxy active?
 | ||||||
|  | 	Active bool | ||||||
|  | 	// PreConfiguredURL of the .pac file.
 | ||||||
|  | 	// If this is empty and Active is true, auto-configuration should be assumed.
 | ||||||
|  | 	PreConfiguredURL string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetConf retrieves the proxy configuration from the Windows Regedit
 | ||||||
|  | func GetConf() ProxyConf { | ||||||
|  | 	return getConf() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OverrideEnvWithStaticProxy writes new values to the
 | ||||||
|  | // `http_proxy`, `https_proxy` and `no_proxy` environment variables.
 | ||||||
|  | // The values are taken from the Windows Regedit (should be called in `init()` function - see example)
 | ||||||
|  | func OverrideEnvWithStaticProxy() { | ||||||
|  | 	overrideEnvWithStaticProxy(GetConf(), os.Setenv) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FindProxyForURL computes the proxy for a given URL according to the pac file
 | ||||||
|  | func (psc *ProxyScriptConf) FindProxyForURL(URL string) string { | ||||||
|  | 	return psc.findProxyForURL(URL) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type envSetter func(string, string) error | ||||||
|  | @ -0,0 +1,10 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | func getConf() ProxyConf { | ||||||
|  | 	return ProxyConf{} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { | ||||||
|  | } | ||||||
|  | @ -0,0 +1,164 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"strings" | ||||||
|  | 	"sync" | ||||||
|  | 	"unsafe" | ||||||
|  | 
 | ||||||
|  | 	"golang.org/x/sys/windows/registry" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type regeditValues struct { | ||||||
|  | 	ProxyServer   string | ||||||
|  | 	ProxyOverride string | ||||||
|  | 	ProxyEnable   uint64 | ||||||
|  | 	AutoConfigURL string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var once sync.Once | ||||||
|  | var windowsProxyConf ProxyConf | ||||||
|  | 
 | ||||||
|  | // GetConf retrieves the proxy configuration from the Windows Regedit
 | ||||||
|  | func getConf() ProxyConf { | ||||||
|  | 	once.Do(writeConf) | ||||||
|  | 	return windowsProxyConf | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func writeConf() { | ||||||
|  | 	var ( | ||||||
|  | 		cfg *tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG | ||||||
|  | 		err error | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	if cfg, err = getUserConfigFromWindowsSyscall(); err != nil { | ||||||
|  | 		regedit, _ := readRegedit() // If the syscall fails, backup to manual detection.
 | ||||||
|  | 		windowsProxyConf = parseRegedit(regedit) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	defer globalFreeWrapper(cfg.lpszProxy) | ||||||
|  | 	defer globalFreeWrapper(cfg.lpszProxyBypass) | ||||||
|  | 	defer globalFreeWrapper(cfg.lpszAutoConfigUrl) | ||||||
|  | 
 | ||||||
|  | 	windowsProxyConf = ProxyConf{ | ||||||
|  | 		Static: StaticProxyConf{ | ||||||
|  | 			Active: cfg.lpszProxy != nil, | ||||||
|  | 		}, | ||||||
|  | 		Automatic: ProxyScriptConf{ | ||||||
|  | 			Active: cfg.lpszAutoConfigUrl != nil || cfg.fAutoDetect, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if windowsProxyConf.Static.Active { | ||||||
|  | 		protocol := make(map[string]string) | ||||||
|  | 		for _, s := range strings.Split(StringFromUTF16Ptr(cfg.lpszProxy), ";") { | ||||||
|  | 			s = strings.TrimSpace(s) | ||||||
|  | 			if s == "" { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 			pair := strings.SplitN(s, "=", 2) | ||||||
|  | 			if len(pair) > 1 { | ||||||
|  | 				protocol[pair[0]] = pair[1] | ||||||
|  | 			} else { | ||||||
|  | 				protocol[""] = pair[0] | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		windowsProxyConf.Static.Protocols = protocol | ||||||
|  | 		if cfg.lpszProxyBypass != nil { | ||||||
|  | 			windowsProxyConf.Static.NoProxy = strings.Replace(StringFromUTF16Ptr(cfg.lpszProxyBypass), ";", ",", -1) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if windowsProxyConf.Automatic.Active { | ||||||
|  | 		windowsProxyConf.Automatic.PreConfiguredURL = StringFromUTF16Ptr(cfg.lpszAutoConfigUrl) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) { | ||||||
|  | 	handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) | ||||||
|  | 	if handle == 0 { | ||||||
|  | 		return &tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG{}, err | ||||||
|  | 	} | ||||||
|  | 	defer winHttpCloseHandle.Call(handle) | ||||||
|  | 
 | ||||||
|  | 	config := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) | ||||||
|  | 
 | ||||||
|  | 	ret, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(config))) | ||||||
|  | 	if ret > 0 { | ||||||
|  | 		err = nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return config, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OverrideEnvWithStaticProxy writes new values to the
 | ||||||
|  | // http_proxy, https_proxy and no_proxy environment variables.
 | ||||||
|  | // The values are taken from the Windows Regedit (should be called in init() function)
 | ||||||
|  | func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { | ||||||
|  | 	if conf.Static.Active { | ||||||
|  | 		for _, scheme := range []string{"http", "https"} { | ||||||
|  | 			url := mapFallback(scheme, "", conf.Static.Protocols) | ||||||
|  | 			setenv(scheme+"_proxy", url) | ||||||
|  | 		} | ||||||
|  | 		if conf.Static.NoProxy != "" { | ||||||
|  | 			setenv("no_proxy", conf.Static.NoProxy) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func parseRegedit(regedit regeditValues) ProxyConf { | ||||||
|  | 	protocol := make(map[string]string) | ||||||
|  | 	for _, s := range strings.Split(regedit.ProxyServer, ";") { | ||||||
|  | 		if s == "" { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		pair := strings.SplitN(s, "=", 2) | ||||||
|  | 		if len(pair) > 1 { | ||||||
|  | 			protocol[pair[0]] = pair[1] | ||||||
|  | 		} else { | ||||||
|  | 			protocol[""] = pair[0] | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return ProxyConf{ | ||||||
|  | 		Static: StaticProxyConf{ | ||||||
|  | 			Active:    regedit.ProxyEnable > 0, | ||||||
|  | 			Protocols: protocol, | ||||||
|  | 			NoProxy:   strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style
 | ||||||
|  | 		}, | ||||||
|  | 		Automatic: ProxyScriptConf{ | ||||||
|  | 			Active:           regedit.AutoConfigURL != "", | ||||||
|  | 			PreConfiguredURL: regedit.AutoConfigURL, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func readRegedit() (values regeditValues, err error) { | ||||||
|  | 	k, err := registry.OpenKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	defer k.Close() | ||||||
|  | 
 | ||||||
|  | 	values.ProxyServer, _, err = k.GetStringValue("ProxyServer") | ||||||
|  | 	if err != nil && err != registry.ErrNotExist { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride") | ||||||
|  | 	if err != nil && err != registry.ErrNotExist { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable") | ||||||
|  | 	if err != nil && err != registry.ErrNotExist { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL") | ||||||
|  | 	if err != nil && err != registry.ErrNotExist { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	err = nil | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | @ -0,0 +1,15 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"golang.org/x/sys/windows" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var kernel32 = windows.NewLazySystemDLL("kernel32.dll") | ||||||
|  | var globalFree = kernel32.NewProc("GlobalFree") | ||||||
|  | 
 | ||||||
|  | func globalFreeWrapper(ptr *uint16) { | ||||||
|  | 	if ptr != nil { | ||||||
|  | 		_, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr))) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -0,0 +1,7 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | func (psc *ProxyScriptConf) findProxyForURL(URL string) string { | ||||||
|  | 	return "" | ||||||
|  | } | ||||||
|  | @ -0,0 +1,72 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"strings" | ||||||
|  | 	"syscall" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func (psc *ProxyScriptConf) findProxyForURL(URL string) string { | ||||||
|  | 	if !psc.Active { | ||||||
|  | 		return "" | ||||||
|  | 	} | ||||||
|  | 	proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL) | ||||||
|  | 	i := strings.Index(proxy, ";") | ||||||
|  | 	if i >= 0 { | ||||||
|  | 		return proxy[:i] | ||||||
|  | 	} | ||||||
|  | 	return proxy | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getProxyForURL(pacfileURL, URL string) (string, error) { | ||||||
|  | 	pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 	URLPtr, err := syscall.UTF16PtrFromString(URL) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) | ||||||
|  | 	if handle == 0 { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 	defer winHttpCloseHandle.Call(handle) | ||||||
|  | 
 | ||||||
|  | 	dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL | ||||||
|  | 	dwAutoDetectFlags := autoDetectFlag(0) | ||||||
|  | 	pfURLptr := pacfileURLPtr | ||||||
|  | 
 | ||||||
|  | 	if pacfileURL == "" { | ||||||
|  | 		dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT | ||||||
|  | 		dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP | ||||||
|  | 		pfURLptr = nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	options := tWINHTTP_AUTOPROXY_OPTIONS{ | ||||||
|  | 		dwFlags:                dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6
 | ||||||
|  | 		dwAutoDetectFlags:      dwAutoDetectFlags, | ||||||
|  | 		lpszAutoConfigUrl:      pfURLptr, | ||||||
|  | 		lpvReserved:            nil, | ||||||
|  | 		dwReserved:             0, | ||||||
|  | 		fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx
 | ||||||
|  | 	} // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned.
 | ||||||
|  | 	// in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned.
 | ||||||
|  | 
 | ||||||
|  | 	info := new(tWINHTTP_PROXY_INFO) | ||||||
|  | 
 | ||||||
|  | 	ret, _, err := winHttpGetProxyForURL.Call( | ||||||
|  | 		handle, | ||||||
|  | 		uintptr(unsafe.Pointer(URLPtr)), | ||||||
|  | 		uintptr(unsafe.Pointer(&options)), | ||||||
|  | 		uintptr(unsafe.Pointer(info)), | ||||||
|  | 	) | ||||||
|  | 	if ret > 0 { | ||||||
|  | 		err = nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	defer globalFreeWrapper(info.lpszProxyBypass) | ||||||
|  | 	defer globalFreeWrapper(info.lpszProxy) | ||||||
|  | 	return StringFromUTF16Ptr(info.lpszProxy), err | ||||||
|  | } | ||||||
|  | @ -0,0 +1,13 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { | ||||||
|  | 	// Fallthrough to ProxyFromEnvironment on all other OSes.
 | ||||||
|  | 	return http.ProxyFromEnvironment | ||||||
|  | } | ||||||
|  | @ -0,0 +1,52 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"net/http" | ||||||
|  | 	"net/url" | ||||||
|  | 
 | ||||||
|  | 	"golang.org/x/net/http/httpproxy" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { | ||||||
|  | 	// Get the proxy configuration
 | ||||||
|  | 	conf := GetConf() | ||||||
|  | 	envcfg := httpproxy.FromEnvironment() | ||||||
|  | 
 | ||||||
|  | 	if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" { | ||||||
|  | 		// If the user manually specifies environment variables, prefer those over the Windows config.
 | ||||||
|  | 		return http.ProxyFromEnvironment | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return func(req *http.Request) (i *url.URL, e error) { | ||||||
|  | 		if conf.Automatic.Active { | ||||||
|  | 			host := conf.Automatic.FindProxyForURL(req.URL.String()) | ||||||
|  | 			if host != "" { | ||||||
|  | 				return &url.URL{Host: host}, nil | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if conf.Static.Active { | ||||||
|  | 			return staticProxy(conf, req) | ||||||
|  | 		} | ||||||
|  | 		// Should return no proxy; fallthrough.
 | ||||||
|  | 		return http.ProxyFromEnvironment(req) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { | ||||||
|  | 	// If static proxy obtaining is specified
 | ||||||
|  | 	prox := httpproxy.Config{ | ||||||
|  | 		HTTPSProxy: mapFallback("https", "", conf.Static.Protocols), | ||||||
|  | 		HTTPProxy:  mapFallback("http", "", conf.Static.Protocols), | ||||||
|  | 		NoProxy:    conf.Static.NoProxy, | ||||||
|  | 	} | ||||||
|  | 	return prox.ProxyFunc()(req.URL) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Return oKey or fbKey if oKey doesn't exist in the map.
 | ||||||
|  | func mapFallback(oKey, fbKey string, m map[string]string) string { | ||||||
|  | 	if v, ok := m[oKey]; ok { | ||||||
|  | 		return v | ||||||
|  | 	} else { | ||||||
|  | 		return m[fbKey] | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"unicode/utf16" | ||||||
|  | 	"unsafe" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // StringFromUTF16Ptr converts a *uint16 C string to a Go String
 | ||||||
|  | func StringFromUTF16Ptr(s *uint16) string { | ||||||
|  | 	if s == nil { | ||||||
|  | 		return "" | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s)) | ||||||
|  | 
 | ||||||
|  | 	// find the string length
 | ||||||
|  | 	sz := 0 | ||||||
|  | 	for p[sz] != 0 { | ||||||
|  | 		sz++ | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return string(utf16.Decode(p[:sz:sz])) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,50 @@ | ||||||
|  | package ieproxy | ||||||
|  | 
 | ||||||
|  | import "golang.org/x/sys/windows" | ||||||
|  | 
 | ||||||
|  | var winHttp = windows.NewLazySystemDLL("winhttp.dll") | ||||||
|  | var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl") | ||||||
|  | var winHttpOpen = winHttp.NewProc("WinHttpOpen") | ||||||
|  | var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle") | ||||||
|  | var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser") | ||||||
|  | 
 | ||||||
|  | type tWINHTTP_AUTOPROXY_OPTIONS struct { | ||||||
|  | 	dwFlags                autoProxyFlag | ||||||
|  | 	dwAutoDetectFlags      autoDetectFlag | ||||||
|  | 	lpszAutoConfigUrl      *uint16 | ||||||
|  | 	lpvReserved            *uint16 | ||||||
|  | 	dwReserved             uint32 | ||||||
|  | 	fAutoLogonIfChallenged bool | ||||||
|  | } | ||||||
|  | type autoProxyFlag uint32 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	fWINHTTP_AUTOPROXY_AUTO_DETECT         = autoProxyFlag(0x00000001) | ||||||
|  | 	fWINHTTP_AUTOPROXY_CONFIG_URL          = autoProxyFlag(0x00000002) | ||||||
|  | 	fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT     = autoProxyFlag(0x00080000) | ||||||
|  | 	fWINHTTP_AUTOPROXY_NO_CACHE_SVC        = autoProxyFlag(0x00100000) | ||||||
|  | 	fWINHTTP_AUTOPROXY_NO_DIRECTACCESS     = autoProxyFlag(0x00040000) | ||||||
|  | 	fWINHTTP_AUTOPROXY_RUN_INPROCESS       = autoProxyFlag(0x00010000) | ||||||
|  | 	fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000) | ||||||
|  | 	fWINHTTP_AUTOPROXY_SORT_RESULTS        = autoProxyFlag(0x00400000) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type autoDetectFlag uint32 | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	fWINHTTP_AUTO_DETECT_TYPE_DHCP  = autoDetectFlag(0x00000001) | ||||||
|  | 	fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | type tWINHTTP_PROXY_INFO struct { | ||||||
|  | 	dwAccessType    uint32 | ||||||
|  | 	lpszProxy       *uint16 | ||||||
|  | 	lpszProxyBypass *uint16 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct { | ||||||
|  | 	fAutoDetect       bool | ||||||
|  | 	lpszAutoConfigUrl *uint16 | ||||||
|  | 	lpszProxy         *uint16 | ||||||
|  | 	lpszProxyBypass   *uint16 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,370 @@ | ||||||
|  | // Copyright 2017 The Go Authors. All rights reserved.
 | ||||||
|  | // Use of this source code is governed by a BSD-style
 | ||||||
|  | // license that can be found in the LICENSE file.
 | ||||||
|  | 
 | ||||||
|  | // Package httpproxy provides support for HTTP proxy determination
 | ||||||
|  | // based on environment variables, as provided by net/http's
 | ||||||
|  | // ProxyFromEnvironment function.
 | ||||||
|  | //
 | ||||||
|  | // The API is not subject to the Go 1 compatibility promise and may change at
 | ||||||
|  | // any time.
 | ||||||
|  | package httpproxy | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"net" | ||||||
|  | 	"net/url" | ||||||
|  | 	"os" | ||||||
|  | 	"strings" | ||||||
|  | 	"unicode/utf8" | ||||||
|  | 
 | ||||||
|  | 	"golang.org/x/net/idna" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Config holds configuration for HTTP proxy settings. See
 | ||||||
|  | // FromEnvironment for details.
 | ||||||
|  | type Config struct { | ||||||
|  | 	// HTTPProxy represents the value of the HTTP_PROXY or
 | ||||||
|  | 	// http_proxy environment variable. It will be used as the proxy
 | ||||||
|  | 	// URL for HTTP requests and HTTPS requests unless overridden by
 | ||||||
|  | 	// HTTPSProxy or NoProxy.
 | ||||||
|  | 	HTTPProxy string | ||||||
|  | 
 | ||||||
|  | 	// HTTPSProxy represents the HTTPS_PROXY or https_proxy
 | ||||||
|  | 	// environment variable. It will be used as the proxy URL for
 | ||||||
|  | 	// HTTPS requests unless overridden by NoProxy.
 | ||||||
|  | 	HTTPSProxy string | ||||||
|  | 
 | ||||||
|  | 	// NoProxy represents the NO_PROXY or no_proxy environment
 | ||||||
|  | 	// variable. It specifies a string that contains comma-separated values
 | ||||||
|  | 	// specifying hosts that should be excluded from proxying. Each value is
 | ||||||
|  | 	// represented by an IP address prefix (1.2.3.4), an IP address prefix in
 | ||||||
|  | 	// CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*).
 | ||||||
|  | 	// An IP address prefix and domain name can also include a literal port
 | ||||||
|  | 	// number (1.2.3.4:80).
 | ||||||
|  | 	// A domain name matches that name and all subdomains. A domain name with
 | ||||||
|  | 	// a leading "." matches subdomains only. For example "foo.com" matches
 | ||||||
|  | 	// "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com".
 | ||||||
|  | 	// A single asterisk (*) indicates that no proxying should be done.
 | ||||||
|  | 	// A best effort is made to parse the string and errors are
 | ||||||
|  | 	// ignored.
 | ||||||
|  | 	NoProxy string | ||||||
|  | 
 | ||||||
|  | 	// CGI holds whether the current process is running
 | ||||||
|  | 	// as a CGI handler (FromEnvironment infers this from the
 | ||||||
|  | 	// presence of a REQUEST_METHOD environment variable).
 | ||||||
|  | 	// When this is set, ProxyForURL will return an error
 | ||||||
|  | 	// when HTTPProxy applies, because a client could be
 | ||||||
|  | 	// setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy.
 | ||||||
|  | 	CGI bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // config holds the parsed configuration for HTTP proxy settings.
 | ||||||
|  | type config struct { | ||||||
|  | 	// Config represents the original configuration as defined above.
 | ||||||
|  | 	Config | ||||||
|  | 
 | ||||||
|  | 	// httpsProxy is the parsed URL of the HTTPSProxy if defined.
 | ||||||
|  | 	httpsProxy *url.URL | ||||||
|  | 
 | ||||||
|  | 	// httpProxy is the parsed URL of the HTTPProxy if defined.
 | ||||||
|  | 	httpProxy *url.URL | ||||||
|  | 
 | ||||||
|  | 	// ipMatchers represent all values in the NoProxy that are IP address
 | ||||||
|  | 	// prefixes or an IP address in CIDR notation.
 | ||||||
|  | 	ipMatchers []matcher | ||||||
|  | 
 | ||||||
|  | 	// domainMatchers represent all values in the NoProxy that are a domain
 | ||||||
|  | 	// name or hostname & domain name
 | ||||||
|  | 	domainMatchers []matcher | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FromEnvironment returns a Config instance populated from the
 | ||||||
|  | // environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the
 | ||||||
|  | // lowercase versions thereof). HTTPS_PROXY takes precedence over
 | ||||||
|  | // HTTP_PROXY for https requests.
 | ||||||
|  | //
 | ||||||
|  | // The environment values may be either a complete URL or a
 | ||||||
|  | // "host[:port]", in which case the "http" scheme is assumed. An error
 | ||||||
|  | // is returned if the value is a different form.
 | ||||||
|  | func FromEnvironment() *Config { | ||||||
|  | 	return &Config{ | ||||||
|  | 		HTTPProxy:  getEnvAny("HTTP_PROXY", "http_proxy"), | ||||||
|  | 		HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), | ||||||
|  | 		NoProxy:    getEnvAny("NO_PROXY", "no_proxy"), | ||||||
|  | 		CGI:        os.Getenv("REQUEST_METHOD") != "", | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getEnvAny(names ...string) string { | ||||||
|  | 	for _, n := range names { | ||||||
|  | 		if val := os.Getenv(n); val != "" { | ||||||
|  | 			return val | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return "" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ProxyFunc returns a function that determines the proxy URL to use for
 | ||||||
|  | // a given request URL. Changing the contents of cfg will not affect
 | ||||||
|  | // proxy functions created earlier.
 | ||||||
|  | //
 | ||||||
|  | // A nil URL and nil error are returned if no proxy is defined in the
 | ||||||
|  | // environment, or a proxy should not be used for the given request, as
 | ||||||
|  | // defined by NO_PROXY.
 | ||||||
|  | //
 | ||||||
|  | // As a special case, if req.URL.Host is "localhost" (with or without a
 | ||||||
|  | // port number), then a nil URL and nil error will be returned.
 | ||||||
|  | func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { | ||||||
|  | 	// Preprocess the Config settings for more efficient evaluation.
 | ||||||
|  | 	cfg1 := &config{ | ||||||
|  | 		Config: *cfg, | ||||||
|  | 	} | ||||||
|  | 	cfg1.init() | ||||||
|  | 	return cfg1.proxyForURL | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { | ||||||
|  | 	var proxy *url.URL | ||||||
|  | 	if reqURL.Scheme == "https" { | ||||||
|  | 		proxy = cfg.httpsProxy | ||||||
|  | 	} | ||||||
|  | 	if proxy == nil { | ||||||
|  | 		proxy = cfg.httpProxy | ||||||
|  | 		if proxy != nil && cfg.CGI { | ||||||
|  | 			return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if proxy == nil { | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 	if !cfg.useProxy(canonicalAddr(reqURL)) { | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return proxy, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func parseProxy(proxy string) (*url.URL, error) { | ||||||
|  | 	if proxy == "" { | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	proxyURL, err := url.Parse(proxy) | ||||||
|  | 	if err != nil || | ||||||
|  | 		(proxyURL.Scheme != "http" && | ||||||
|  | 			proxyURL.Scheme != "https" && | ||||||
|  | 			proxyURL.Scheme != "socks5") { | ||||||
|  | 		// proxy was bogus. Try prepending "http://" to it and
 | ||||||
|  | 		// see if that parses correctly. If not, we fall
 | ||||||
|  | 		// through and complain about the original one.
 | ||||||
|  | 		if proxyURL, err := url.Parse("http://" + proxy); err == nil { | ||||||
|  | 			return proxyURL, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) | ||||||
|  | 	} | ||||||
|  | 	return proxyURL, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // useProxy reports whether requests to addr should use a proxy,
 | ||||||
|  | // according to the NO_PROXY or no_proxy environment variable.
 | ||||||
|  | // addr is always a canonicalAddr with a host and port.
 | ||||||
|  | func (cfg *config) useProxy(addr string) bool { | ||||||
|  | 	if len(addr) == 0 { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	host, port, err := net.SplitHostPort(addr) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	if host == "localhost" { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  | 	ip := net.ParseIP(host) | ||||||
|  | 	if ip != nil { | ||||||
|  | 		if ip.IsLoopback() { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	addr = strings.ToLower(strings.TrimSpace(host)) | ||||||
|  | 
 | ||||||
|  | 	if ip != nil { | ||||||
|  | 		for _, m := range cfg.ipMatchers { | ||||||
|  | 			if m.match(addr, port, ip) { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for _, m := range cfg.domainMatchers { | ||||||
|  | 		if m.match(addr, port, ip) { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (c *config) init() { | ||||||
|  | 	if parsed, err := parseProxy(c.HTTPProxy); err == nil { | ||||||
|  | 		c.httpProxy = parsed | ||||||
|  | 	} | ||||||
|  | 	if parsed, err := parseProxy(c.HTTPSProxy); err == nil { | ||||||
|  | 		c.httpsProxy = parsed | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, p := range strings.Split(c.NoProxy, ",") { | ||||||
|  | 		p = strings.ToLower(strings.TrimSpace(p)) | ||||||
|  | 		if len(p) == 0 { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if p == "*" { | ||||||
|  | 			c.ipMatchers = []matcher{allMatch{}} | ||||||
|  | 			c.domainMatchers = []matcher{allMatch{}} | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// IPv4/CIDR, IPv6/CIDR
 | ||||||
|  | 		if _, pnet, err := net.ParseCIDR(p); err == nil { | ||||||
|  | 			c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// IPv4:port, [IPv6]:port
 | ||||||
|  | 		phost, pport, err := net.SplitHostPort(p) | ||||||
|  | 		if err == nil { | ||||||
|  | 			if len(phost) == 0 { | ||||||
|  | 				// There is no host part, likely the entry is malformed; ignore.
 | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 			if phost[0] == '[' && phost[len(phost)-1] == ']' { | ||||||
|  | 				phost = phost[1 : len(phost)-1] | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			phost = p | ||||||
|  | 		} | ||||||
|  | 		// IPv4, IPv6
 | ||||||
|  | 		if pip := net.ParseIP(phost); pip != nil { | ||||||
|  | 			c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if len(phost) == 0 { | ||||||
|  | 			// There is no host part, likely the entry is malformed; ignore.
 | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// domain.com or domain.com:80
 | ||||||
|  | 		// foo.com matches bar.foo.com
 | ||||||
|  | 		// .domain.com or .domain.com:port
 | ||||||
|  | 		// *.domain.com or *.domain.com:port
 | ||||||
|  | 		if strings.HasPrefix(phost, "*.") { | ||||||
|  | 			phost = phost[1:] | ||||||
|  | 		} | ||||||
|  | 		matchHost := false | ||||||
|  | 		if phost[0] != '.' { | ||||||
|  | 			matchHost = true | ||||||
|  | 			phost = "." + phost | ||||||
|  | 		} | ||||||
|  | 		c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var portMap = map[string]string{ | ||||||
|  | 	"http":   "80", | ||||||
|  | 	"https":  "443", | ||||||
|  | 	"socks5": "1080", | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // canonicalAddr returns url.Host but always with a ":port" suffix
 | ||||||
|  | func canonicalAddr(url *url.URL) string { | ||||||
|  | 	addr := url.Hostname() | ||||||
|  | 	if v, err := idnaASCII(addr); err == nil { | ||||||
|  | 		addr = v | ||||||
|  | 	} | ||||||
|  | 	port := url.Port() | ||||||
|  | 	if port == "" { | ||||||
|  | 		port = portMap[url.Scheme] | ||||||
|  | 	} | ||||||
|  | 	return net.JoinHostPort(addr, port) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Given a string of the form "host", "host:port", or "[ipv6::address]:port",
 | ||||||
|  | // return true if the string includes a port.
 | ||||||
|  | func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } | ||||||
|  | 
 | ||||||
|  | func idnaASCII(v string) (string, error) { | ||||||
|  | 	// TODO: Consider removing this check after verifying performance is okay.
 | ||||||
|  | 	// Right now punycode verification, length checks, context checks, and the
 | ||||||
|  | 	// permissible character tests are all omitted. It also prevents the ToASCII
 | ||||||
|  | 	// call from salvaging an invalid IDN, when possible. As a result it may be
 | ||||||
|  | 	// possible to have two IDNs that appear identical to the user where the
 | ||||||
|  | 	// ASCII-only version causes an error downstream whereas the non-ASCII
 | ||||||
|  | 	// version does not.
 | ||||||
|  | 	// Note that for correct ASCII IDNs ToASCII will only do considerably more
 | ||||||
|  | 	// work, but it will not cause an allocation.
 | ||||||
|  | 	if isASCII(v) { | ||||||
|  | 		return v, nil | ||||||
|  | 	} | ||||||
|  | 	return idna.Lookup.ToASCII(v) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func isASCII(s string) bool { | ||||||
|  | 	for i := 0; i < len(s); i++ { | ||||||
|  | 		if s[i] >= utf8.RuneSelf { | ||||||
|  | 			return false | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // matcher represents the matching rule for a given value in the NO_PROXY list
 | ||||||
|  | type matcher interface { | ||||||
|  | 	// match returns true if the host and optional port or ip and optional port
 | ||||||
|  | 	// are allowed
 | ||||||
|  | 	match(host, port string, ip net.IP) bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // allMatch matches on all possible inputs
 | ||||||
|  | type allMatch struct{} | ||||||
|  | 
 | ||||||
|  | func (a allMatch) match(host, port string, ip net.IP) bool { | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type cidrMatch struct { | ||||||
|  | 	cidr *net.IPNet | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m cidrMatch) match(host, port string, ip net.IP) bool { | ||||||
|  | 	return m.cidr.Contains(ip) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type ipMatch struct { | ||||||
|  | 	ip   net.IP | ||||||
|  | 	port string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m ipMatch) match(host, port string, ip net.IP) bool { | ||||||
|  | 	if m.ip.Equal(ip) { | ||||||
|  | 		return m.port == "" || m.port == port | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type domainMatch struct { | ||||||
|  | 	host string | ||||||
|  | 	port string | ||||||
|  | 
 | ||||||
|  | 	matchHost bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (m domainMatch) match(host, port string, ip net.IP) bool { | ||||||
|  | 	if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { | ||||||
|  | 		return m.port == "" || m.port == port | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
		Loading…
	
		Reference in New Issue