fix highwayhash
This commit is contained in:
parent
5a2cfeabba
commit
6b020a1f97
6
Makefile
6
Makefile
|
|
@ -40,6 +40,12 @@ WARMER_PACKAGE = $(REPOPATH)/cmd/warmer
|
|||
KANIKO_PROJECT = $(REPOPATH)/kaniko
|
||||
BUILD_ARG ?=
|
||||
|
||||
# Force using Go Modules and always read the dependencies from
|
||||
# the `vendor` folder.
|
||||
export GO111MODULE = on
|
||||
export GOFLAGS = -mod=vendor
|
||||
|
||||
|
||||
out/executor: $(GO_FILES)
|
||||
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -o $@ $(EXECUTOR_PACKAGE)
|
||||
|
||||
|
|
|
|||
2
go.mod
2
go.mod
|
|
@ -103,8 +103,6 @@ require (
|
|||
google.golang.org/appengine v1.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 // indirect
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 // indirect
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.51.0 // indirect
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.0 // indirect
|
||||
|
|
|
|||
7
go.sum
7
go.sum
|
|
@ -137,6 +137,7 @@ github.com/karrick/godirwalk v1.7.7 h1:lLkPCA+C0u1pI4fLFseaupvh5/THlPJIqSPmnGGVi
|
|||
github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
|
|
@ -207,8 +208,6 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo1
|
|||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
|
|
@ -271,13 +270,9 @@ google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 h1:2PjFmwzH/sxgW9C
|
|||
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 h1:vTe0EiECbrLSj5+6SXMi+eWwYW/bjmd26LE5lv52YVs=
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
||||
|
||||
File diff suppressed because one or more lines are too long
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Executable file → Normal file
0
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,12 @@
|
|||
# go-ansiterm
|
||||
|
||||
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
|
||||
|
||||
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
|
||||
|
||||
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
|
||||
|
||||
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
|
||||
|
||||
-----
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
|
@ -0,0 +1,292 @@
|
|||
# Azure Active Directory authentication for Go
|
||||
|
||||
This is a standalone package for authenticating with Azure Active
|
||||
Directory from other Go libraries and applications, in particular the [Azure SDK
|
||||
for Go](https://github.com/Azure/azure-sdk-for-go).
|
||||
|
||||
Note: Despite the package's name it is not related to other "ADAL" libraries
|
||||
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
|
||||
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
|
||||
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
|
||||
trackers.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get -u github.com/Azure/go-autorest/autorest/adal
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
|
||||
|
||||
### Register an Azure AD Application with secret
|
||||
|
||||
|
||||
1. Register a new application with a `secret` credential
|
||||
|
||||
```
|
||||
az ad app create \
|
||||
--display-name example-app \
|
||||
--homepage https://example-app/home \
|
||||
--identifier-uris https://example-app/app \
|
||||
--password secret
|
||||
```
|
||||
|
||||
2. Create a service principal using the `Application ID` from previous step
|
||||
|
||||
```
|
||||
az ad sp create --id "Application ID"
|
||||
```
|
||||
|
||||
* Replace `Application ID` with `appId` from step 1.
|
||||
|
||||
### Register an Azure AD Application with certificate
|
||||
|
||||
1. Create a private key
|
||||
|
||||
```
|
||||
openssl genrsa -out "example-app.key" 2048
|
||||
```
|
||||
|
||||
2. Create the certificate
|
||||
|
||||
```
|
||||
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
|
||||
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
|
||||
```
|
||||
|
||||
3. Create the PKCS12 version of the certificate containing also the private key
|
||||
|
||||
```
|
||||
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
|
||||
|
||||
```
|
||||
|
||||
4. Register a new application with the certificate content form `example-app.crt`
|
||||
|
||||
```
|
||||
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
|
||||
|
||||
az ad app create \
|
||||
--display-name example-app \
|
||||
--homepage https://example-app/home \
|
||||
--identifier-uris https://example-app/app \
|
||||
--key-usage Verify --end-date 2018-01-01 \
|
||||
--key-value "${certificateContents}"
|
||||
```
|
||||
|
||||
5. Create a service principal using the `Application ID` from previous step
|
||||
|
||||
```
|
||||
az ad sp create --id "APPLICATION_ID"
|
||||
```
|
||||
|
||||
* Replace `APPLICATION_ID` with `appId` from step 4.
|
||||
|
||||
|
||||
### Grant the necessary permissions
|
||||
|
||||
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
|
||||
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
|
||||
which can be assigned to a service principal of an Azure AD application depending of your needs.
|
||||
|
||||
```
|
||||
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
|
||||
```
|
||||
|
||||
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
|
||||
* Replace the `ROLE_NAME` with a role name of your choice.
|
||||
|
||||
It is also possible to define custom role definitions.
|
||||
|
||||
```
|
||||
az role definition create --role-definition role-definition.json
|
||||
```
|
||||
|
||||
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
|
||||
|
||||
|
||||
### Acquire Access Token
|
||||
|
||||
The common configuration used by all flows:
|
||||
|
||||
```Go
|
||||
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
|
||||
tenantID := "TENANT_ID"
|
||||
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
|
||||
|
||||
applicationID := "APPLICATION_ID"
|
||||
|
||||
callback := func(token adal.Token) error {
|
||||
// This is called after the token is acquired
|
||||
}
|
||||
|
||||
// The resource for which the token is acquired
|
||||
resource := "https://management.core.windows.net/"
|
||||
```
|
||||
|
||||
* Replace the `TENANT_ID` with your tenant ID.
|
||||
* Replace the `APPLICATION_ID` with the value from previous section.
|
||||
|
||||
#### Client Credentials
|
||||
|
||||
```Go
|
||||
applicationSecret := "APPLICATION_SECRET"
|
||||
|
||||
spt, err := adal.NewServicePrincipalToken(
|
||||
oauthConfig,
|
||||
appliationID,
|
||||
applicationSecret,
|
||||
resource,
|
||||
callbacks...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Acquire a new access token
|
||||
err = spt.Refresh()
|
||||
if (err == nil) {
|
||||
token := spt.Token
|
||||
}
|
||||
```
|
||||
|
||||
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
|
||||
|
||||
#### Client Certificate
|
||||
|
||||
```Go
|
||||
certificatePath := "./example-app.pfx"
|
||||
|
||||
certData, err := ioutil.ReadFile(certificatePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
|
||||
}
|
||||
|
||||
// Get the certificate and private key from pfx file
|
||||
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
|
||||
}
|
||||
|
||||
spt, err := adal.NewServicePrincipalTokenFromCertificate(
|
||||
oauthConfig,
|
||||
applicationID,
|
||||
certificate,
|
||||
rsaPrivateKey,
|
||||
resource,
|
||||
callbacks...)
|
||||
|
||||
// Acquire a new access token
|
||||
err = spt.Refresh()
|
||||
if (err == nil) {
|
||||
token := spt.Token
|
||||
}
|
||||
```
|
||||
|
||||
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
|
||||
|
||||
|
||||
#### Device Code
|
||||
|
||||
```Go
|
||||
oauthClient := &http.Client{}
|
||||
|
||||
// Acquire the device code
|
||||
deviceCode, err := adal.InitiateDeviceAuth(
|
||||
oauthClient,
|
||||
oauthConfig,
|
||||
applicationID,
|
||||
resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
|
||||
}
|
||||
|
||||
// Display the authentication message
|
||||
fmt.Println(*deviceCode.Message)
|
||||
|
||||
// Wait here until the user is authenticated
|
||||
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
|
||||
}
|
||||
|
||||
spt, err := adal.NewServicePrincipalTokenFromManualToken(
|
||||
oauthConfig,
|
||||
applicationID,
|
||||
resource,
|
||||
*token,
|
||||
callbacks...)
|
||||
|
||||
if (err == nil) {
|
||||
token := spt.Token
|
||||
}
|
||||
```
|
||||
|
||||
#### Username password authenticate
|
||||
|
||||
```Go
|
||||
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
|
||||
oauthConfig,
|
||||
applicationID,
|
||||
username,
|
||||
password,
|
||||
resource,
|
||||
callbacks...)
|
||||
|
||||
if (err == nil) {
|
||||
token := spt.Token
|
||||
}
|
||||
```
|
||||
|
||||
#### Authorization code authenticate
|
||||
|
||||
``` Go
|
||||
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
|
||||
oauthConfig,
|
||||
applicationID,
|
||||
clientSecret,
|
||||
authorizationCode,
|
||||
redirectURI,
|
||||
resource,
|
||||
callbacks...)
|
||||
|
||||
err = spt.Refresh()
|
||||
if (err == nil) {
|
||||
token := spt.Token
|
||||
}
|
||||
```
|
||||
|
||||
### Command Line Tool
|
||||
|
||||
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
|
||||
|
||||
```
|
||||
adal -h
|
||||
|
||||
Usage of ./adal:
|
||||
-applicationId string
|
||||
application id
|
||||
-certificatePath string
|
||||
path to pk12/PFC application certificate
|
||||
-mode string
|
||||
authentication mode (device, secret, cert, refresh) (default "device")
|
||||
-resource string
|
||||
resource for which the token is requested
|
||||
-secret string
|
||||
application secret
|
||||
-tenantId string
|
||||
tenant id
|
||||
-tokenCachePath string
|
||||
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
|
||||
```
|
||||
|
||||
Example acquire a token for `https://management.core.windows.net/` using device code flow:
|
||||
|
||||
```
|
||||
adal -mode device \
|
||||
-applicationId "APPLICATION_ID" \
|
||||
-tenantId "TENANT_ID" \
|
||||
-resource https://management.core.windows.net/
|
||||
|
||||
```
|
||||
|
|
@ -0,0 +1 @@
|
|||
*.exe
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
Gotty is a library written in Go that determines and reads termcap database
|
||||
files to produce an interface for interacting with the capabilities of a
|
||||
terminal.
|
||||
See the godoc documentation or the source code for more information about
|
||||
function usage.
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
|
||||
all:// TODO add more documentation, with function usage in a doc.go file.
|
||||
all:// TODO add more testing/benchmarking with go test.
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
|
||||
[with_colon]
|
||||
aws_access_key_id: accessKey
|
||||
aws_secret_access_key: secret
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,4 @@
|
|||
*.prof
|
||||
*.test
|
||||
*.swp
|
||||
/bin/
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
||||
|
|
@ -0,0 +1,916 @@
|
|||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||
|
||||
```go
|
||||
|
||||
// createUser creates a new user in the given account.
|
||||
func createUser(accountID int, u *User) error {
|
||||
// Start the transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Retrieve the root bucket for the account.
|
||||
// Assume this has already been created when the account was set up.
|
||||
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||
|
||||
// Setup the users bucket.
|
||||
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate an ID for the new user.
|
||||
userID, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ID = userID
|
||||
|
||||
// Marshal and save the encoded user.
|
||||
if buf, err := json.Marshal(u); err != nil {
|
||||
return err
|
||||
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||
shared by multiple processes.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\boltdb\bolt
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl"
|
||||
|
||||
fix() {
|
||||
sed 's,^package syscall$,package sysx,' \
|
||||
| sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \
|
||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
||||
| gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \
|
||||
| gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \
|
||||
| gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \
|
||||
| gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR'
|
||||
}
|
||||
|
||||
if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then
|
||||
echo "Must specify \$GOARCH and \$GOOS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkargs=""
|
||||
|
||||
if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then
|
||||
mkargs="-l32"
|
||||
fi
|
||||
|
||||
for f in "$@"; do
|
||||
$mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go"
|
||||
done
|
||||
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.7.x
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: fmt vet test deps
|
||||
|
||||
test: deps
|
||||
go test -v ./...
|
||||
|
||||
deps:
|
||||
go get -d -t ./...
|
||||
|
||||
fmt:
|
||||
gofmt -s -l .
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
### fifo
|
||||
|
||||
[](https://travis-ci.org/containerd/fifo)
|
||||
|
||||
Go package for handling fifos in a sane way.
|
||||
|
||||
```
|
||||
// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
|
||||
// Context can be used to cancel this function until open(2) has not returned.
|
||||
// Accepted flags:
|
||||
// - syscall.O_CREAT - create new fifo if one doesn't exist
|
||||
// - syscall.O_RDONLY - open fifo only from reader side
|
||||
// - syscall.O_WRONLY - open fifo only from writer side
|
||||
// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
|
||||
// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
|
||||
// fifo isn't open. read/write will be connected after the actual fifo is
|
||||
// open or after fifo is closed.
|
||||
func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
|
||||
|
||||
|
||||
// Read from a fifo to a byte array.
|
||||
func (f *fifo) Read(b []byte) (int, error)
|
||||
|
||||
|
||||
// Write from byte array to a fifo.
|
||||
func (f *fifo) Write(b []byte) (int, error)
|
||||
|
||||
|
||||
// Close the fifo. Next reads/writes will error. This method can also be used
|
||||
// before open(2) has returned and fifo was never opened.
|
||||
func (f *fifo) Close() error
|
||||
```
|
||||
|
|
@ -1 +0,0 @@
|
|||
docs.md
|
||||
|
|
@ -1 +0,0 @@
|
|||
../
|
||||
|
|
@ -1 +0,0 @@
|
|||
../etcdctl
|
||||
|
|
@ -1 +0,0 @@
|
|||
../functional
|
||||
|
|
@ -1 +0,0 @@
|
|||
../tools
|
||||
|
|
@ -0,0 +1,95 @@
|
|||
syntax = "proto2";
|
||||
package raftpb;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
option (gogoproto.goproto_enum_prefix_all) = false;
|
||||
|
||||
enum EntryType {
|
||||
EntryNormal = 0;
|
||||
EntryConfChange = 1;
|
||||
}
|
||||
|
||||
message Entry {
|
||||
optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
||||
optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
|
||||
optional EntryType Type = 1 [(gogoproto.nullable) = false];
|
||||
optional bytes Data = 4;
|
||||
}
|
||||
|
||||
message SnapshotMetadata {
|
||||
optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
|
||||
optional uint64 index = 2 [(gogoproto.nullable) = false];
|
||||
optional uint64 term = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Snapshot {
|
||||
optional bytes data = 1;
|
||||
optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
enum MessageType {
|
||||
MsgHup = 0;
|
||||
MsgBeat = 1;
|
||||
MsgProp = 2;
|
||||
MsgApp = 3;
|
||||
MsgAppResp = 4;
|
||||
MsgVote = 5;
|
||||
MsgVoteResp = 6;
|
||||
MsgSnap = 7;
|
||||
MsgHeartbeat = 8;
|
||||
MsgHeartbeatResp = 9;
|
||||
MsgUnreachable = 10;
|
||||
MsgSnapStatus = 11;
|
||||
MsgCheckQuorum = 12;
|
||||
MsgTransferLeader = 13;
|
||||
MsgTimeoutNow = 14;
|
||||
MsgReadIndex = 15;
|
||||
MsgReadIndexResp = 16;
|
||||
MsgPreVote = 17;
|
||||
MsgPreVoteResp = 18;
|
||||
}
|
||||
|
||||
message Message {
|
||||
optional MessageType type = 1 [(gogoproto.nullable) = false];
|
||||
optional uint64 to = 2 [(gogoproto.nullable) = false];
|
||||
optional uint64 from = 3 [(gogoproto.nullable) = false];
|
||||
optional uint64 term = 4 [(gogoproto.nullable) = false];
|
||||
optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
|
||||
optional uint64 index = 6 [(gogoproto.nullable) = false];
|
||||
repeated Entry entries = 7 [(gogoproto.nullable) = false];
|
||||
optional uint64 commit = 8 [(gogoproto.nullable) = false];
|
||||
optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
|
||||
optional bool reject = 10 [(gogoproto.nullable) = false];
|
||||
optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
|
||||
optional bytes context = 12;
|
||||
}
|
||||
|
||||
message HardState {
|
||||
optional uint64 term = 1 [(gogoproto.nullable) = false];
|
||||
optional uint64 vote = 2 [(gogoproto.nullable) = false];
|
||||
optional uint64 commit = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ConfState {
|
||||
repeated uint64 nodes = 1;
|
||||
repeated uint64 learners = 2;
|
||||
}
|
||||
|
||||
enum ConfChangeType {
|
||||
ConfChangeAddNode = 0;
|
||||
ConfChangeRemoveNode = 1;
|
||||
ConfChangeUpdateNode = 2;
|
||||
ConfChangeAddLearnerNode = 3;
|
||||
}
|
||||
|
||||
message ConfChange {
|
||||
optional uint64 ID = 1 [(gogoproto.nullable) = false];
|
||||
optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
|
||||
optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
|
||||
optional bytes Context = 4;
|
||||
}
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
.DS_Store
|
||||
bin
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
language: go
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- tip
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
## Migration Guide from v2 -> v3
|
||||
|
||||
Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
|
||||
|
||||
### `Token.Claims` is now an interface type
|
||||
|
||||
The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
|
||||
|
||||
`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
|
||||
|
||||
The old example for parsing a token looked like this..
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is now directly mapped to...
|
||||
|
||||
```go
|
||||
if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
|
||||
|
||||
```go
|
||||
type MyCustomClaims struct {
|
||||
User string
|
||||
*StandardClaims
|
||||
}
|
||||
|
||||
if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(*MyCustomClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
|
||||
}
|
||||
```
|
||||
|
||||
### `ParseFromRequest` has been moved
|
||||
|
||||
To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
|
||||
|
||||
`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
|
||||
|
||||
This simple parsing example:
|
||||
|
||||
```go
|
||||
if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
|
||||
fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
is directly mapped to:
|
||||
|
||||
```go
|
||||
if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
|
||||
claims := token.Claims.(jwt.MapClaims)
|
||||
fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
|
||||
}
|
||||
```
|
||||
|
||||
There are several concrete `Extractor` types provided for your convenience:
|
||||
|
||||
* `HeaderExtractor` will search a list of headers until one contains content.
|
||||
* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
|
||||
* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
|
||||
* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
|
||||
* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
|
||||
* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
|
||||
|
||||
|
||||
### RSA signing methods no longer accept `[]byte` keys
|
||||
|
||||
Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
|
||||
|
||||
To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
|
||||
|
||||
```go
|
||||
func keyLookupFunc(*Token) (interface{}, error) {
|
||||
// Don't forget to validate the alg is what you expect:
|
||||
if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
|
||||
return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
|
||||
// Look up key
|
||||
key, err := lookupPublicKey(token.Header["kid"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unpack key from PEM encoded PKCS8
|
||||
return jwt.ParseRSAPublicKeyFromPEM(key)
|
||||
}
|
||||
```
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
# jwt-go
|
||||
|
||||
[](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
[](https://godoc.org/github.com/dgrijalva/jwt-go)
|
||||
|
||||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
|
||||
**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
|
||||
|
||||
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
|
||||
|
||||
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
|
||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||
|
||||
## What's in the box?
|
||||
|
||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
|
||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
## Project Status & Versioning
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
**BREAKING CHANGES:***
|
||||
* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
### Signing vs Encryption
|
||||
|
||||
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
||||
|
||||
* The author of the token was in the possession of the signing secret
|
||||
* The data has not been modified since it was signed
|
||||
|
||||
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
||||
|
||||
### Choosing a Signing Method
|
||||
|
||||
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
||||
|
||||
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
||||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### Signing Methods and Key Types
|
||||
|
||||
Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
|
||||
|
||||
* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
|
||||
* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
|
||||
* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
||||
|
|
@ -0,0 +1,118 @@
|
|||
## `jwt-go` Version History
|
||||
|
||||
#### 3.2.0
|
||||
|
||||
* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
|
||||
* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
|
||||
* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
|
||||
* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
|
||||
|
||||
#### 3.1.0
|
||||
|
||||
* Improvements to `jwt` command line tool
|
||||
* Added `SkipClaimsValidation` option to `Parser`
|
||||
* Documentation updates
|
||||
|
||||
#### 3.0.0
|
||||
|
||||
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
|
||||
* Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
|
||||
* `ParseFromRequest` has been moved to `request` subpackage and usage has changed
|
||||
* The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
|
||||
* Other Additions and Changes
|
||||
* Added `Claims` interface type to allow users to decode the claims into a custom type
|
||||
* Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
|
||||
* Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
|
||||
* Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
|
||||
* Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
|
||||
* Added several new, more specific, validation errors to error type bitmask
|
||||
* Moved examples from README to executable example files
|
||||
* Signing method registry is now thread safe
|
||||
* Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
|
||||
|
||||
#### 2.7.0
|
||||
|
||||
This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
|
||||
|
||||
* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
|
||||
* Error text for expired tokens includes how long it's been expired
|
||||
* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
|
||||
* Documentation updates
|
||||
|
||||
#### 2.6.0
|
||||
|
||||
* Exposed inner error within ValidationError
|
||||
* Fixed validation errors when using UseJSONNumber flag
|
||||
* Added several unit tests
|
||||
|
||||
#### 2.5.0
|
||||
|
||||
* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
|
||||
* Updated/fixed some documentation
|
||||
* Added more helpful error message when trying to parse tokens that begin with `BEARER `
|
||||
|
||||
#### 2.4.0
|
||||
|
||||
* Added new type, Parser, to allow for configuration of various parsing parameters
|
||||
* You can now specify a list of valid signing methods. Anything outside this set will be rejected.
|
||||
* You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
|
||||
* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
* Fixed some bugs with ECDSA parsing
|
||||
|
||||
#### 2.3.0
|
||||
|
||||
* Added support for ECDSA signing methods
|
||||
* Added support for RSA PSS signing methods (requires go v1.4)
|
||||
|
||||
#### 2.2.0
|
||||
|
||||
* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
|
||||
|
||||
#### 2.1.0
|
||||
|
||||
Backwards compatible API change that was missed in 2.0.0.
|
||||
|
||||
* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
|
||||
|
||||
#### 2.0.0
|
||||
|
||||
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
|
||||
|
||||
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
|
||||
|
||||
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
|
||||
|
||||
* **Compatibility Breaking Changes**
|
||||
* `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
|
||||
* `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
|
||||
* `KeyFunc` now returns `interface{}` instead of `[]byte`
|
||||
* `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
|
||||
* `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
|
||||
* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodHS256`
|
||||
* Added public package global `SigningMethodHS384`
|
||||
* Added public package global `SigningMethodHS512`
|
||||
* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
|
||||
* Added public package global `SigningMethodRS256`
|
||||
* Added public package global `SigningMethodRS384`
|
||||
* Added public package global `SigningMethodRS512`
|
||||
* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
|
||||
* Refactored the RSA implementation to be easier to read
|
||||
* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
|
||||
|
||||
#### 1.0.2
|
||||
|
||||
* Fixed bug in parsing public keys from certificates
|
||||
* Added more tests around the parsing of keys for RS256
|
||||
* Code refactoring in RS256 implementation. No functional changes
|
||||
|
||||
#### 1.0.1
|
||||
|
||||
* Fixed panic if RS256 signing method was passed an invalid key
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
* First versioned release
|
||||
* API stabilized
|
||||
* Supports creating, signing, parsing, and validating JWT tokens
|
||||
* Supports RS256 and HS256 signing methods
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# never checkin from the bin file (for now)
|
||||
bin/*
|
||||
|
||||
# Test key files
|
||||
*.pem
|
||||
|
||||
# Cover profiles
|
||||
*.out
|
||||
|
||||
# Editor/IDE specific files.
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.idea/*
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
||||
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
|
||||
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
||||
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
||||
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
|
||||
# Building the registry source
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to actively work on the registry.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
|
||||
|
||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||
|
||||
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||
|
||||
### Gotchas
|
||||
|
||||
You are expected to know your way around with go & git.
|
||||
|
||||
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
The first prerequisite of properly building distribution targets is to have a Go
|
||||
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
|
||||
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
|
||||
environment.
|
||||
|
||||
If a Go development environment is setup, one can use `go get` to install the
|
||||
`registry` command from the current latest:
|
||||
|
||||
go get github.com/docker/distribution/cmd/registry
|
||||
|
||||
The above will install the source repository into the `GOPATH`.
|
||||
|
||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
||||
|
||||
mkdir -p /var/lib/registry
|
||||
|
||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
||||
|
||||
The `registry`
|
||||
binary can then be run with the following:
|
||||
|
||||
$ $GOPATH/bin/registry --version
|
||||
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
|
||||
|
||||
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
|
||||
> project, for these build instructions to work, the project must be checked
|
||||
> out in the correct location in the `GOPATH`. This should almost always be
|
||||
> `$GOPATH/src/github.com/docker/distribution`.
|
||||
|
||||
The registry can be run with the default config using the following
|
||||
incantation:
|
||||
|
||||
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
|
||||
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] debug server listening localhost:5001
|
||||
|
||||
If it is working, one should see the above log messages.
|
||||
|
||||
### Repeatable Builds
|
||||
|
||||
For the full development experience, one should `cd` into
|
||||
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
|
||||
commands, such as `go test`, should work per package (please see
|
||||
[Developing](#developing) if they don't work).
|
||||
|
||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
||||
Please install the following into `GOPATH` for it to work:
|
||||
|
||||
go get github.com/golang/lint/golint
|
||||
|
||||
Once these commands are available in the `GOPATH`, run `make` to get a full
|
||||
build:
|
||||
|
||||
$ make
|
||||
+ clean
|
||||
+ fmt
|
||||
+ vet
|
||||
+ lint
|
||||
+ build
|
||||
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
|
||||
github.com/sirupsen/logrus
|
||||
github.com/docker/libtrust
|
||||
...
|
||||
github.com/yvasiyarov/gorelic
|
||||
github.com/docker/distribution/registry/handlers
|
||||
github.com/docker/distribution/cmd/registry
|
||||
+ test
|
||||
...
|
||||
ok github.com/docker/distribution/digest 7.875s
|
||||
ok github.com/docker/distribution/manifest 0.028s
|
||||
ok github.com/docker/distribution/notifications 17.322s
|
||||
? github.com/docker/distribution/registry [no test files]
|
||||
ok github.com/docker/distribution/registry/api/v2 0.101s
|
||||
? github.com/docker/distribution/registry/auth [no test files]
|
||||
ok github.com/docker/distribution/registry/auth/silly 0.011s
|
||||
...
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
The above provides a repeatable build using the contents of the vendor
|
||||
directory. This includes formatting, vetting, linting, building,
|
||||
testing and generating tagged binaries. We can verify this worked by running
|
||||
the registry binary generated in the "./bin" directory:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
||||
|
||||
### Optional build tags
|
||||
|
||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
||||
the environment variable `DOCKER_BUILDTAGS`.
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
# Changelog
|
||||
|
||||
## 2.6.0 (2017-01-18)
|
||||
|
||||
#### Storage
|
||||
- S3: fixed bug in delete due to read-after-write inconsistency
|
||||
- S3: allow EC2 IAM roles to be used when authorizing region endpoints
|
||||
- S3: add Object ACL Support
|
||||
- S3: fix delete method's notion of subpaths
|
||||
- S3: use multipart upload API in `Move` method for performance
|
||||
- S3: add v2 signature signing for legacy S3 clones
|
||||
- Swift: add simple heuristic to detect incomplete DLOs during read ops
|
||||
- Swift: support different user and tenant domains
|
||||
- Swift: bulk deletes in chunks
|
||||
- Aliyun OSS: fix delete method's notion of subpaths
|
||||
- Aliyun OSS: optimize data copy after upload finishes
|
||||
- Azure: close leaking response body
|
||||
- Fix storage drivers dropping non-EOF errors when listing repositories
|
||||
- Compare path properly when listing repositories in catalog
|
||||
- Add a foreign layer URL host whitelist
|
||||
- Improve catalog enumerate runtime
|
||||
|
||||
#### Registry
|
||||
- Export `storage.CreateOptions` in top-level package
|
||||
- Enable notifications to endpoints that use self-signed certificates
|
||||
- Properly validate multi-URL foreign layers
|
||||
- Add control over validation of URLs in pushed manifests
|
||||
- Proxy mode: fix socket leak when pull is cancelled
|
||||
- Tag service: properly handle error responses on HEAD request
|
||||
- Support for custom authentication URL in proxying registry
|
||||
- Add configuration option to disable access logging
|
||||
- Add notification filtering by target media type
|
||||
- Manifest: `References()` returns all children
|
||||
- Honor `X-Forwarded-Port` and Forwarded headers
|
||||
- Reference: Preserve tag and digest in With* functions
|
||||
- Add policy configuration for enforcing repository classes
|
||||
|
||||
#### Client
|
||||
- Changes the client Tags `All()` method to follow links
|
||||
- Allow registry clients to connect via HTTP2
|
||||
- Better handling of OAuth errors in client
|
||||
|
||||
#### Spec
|
||||
- Manifest: clarify relationship between urls and foreign layers
|
||||
- Authorization: add support for repository classes
|
||||
|
||||
#### Manifest
|
||||
- Override media type returned from `Stat()` for existing manifests
|
||||
- Add plugin mediatype to distribution manifest
|
||||
|
||||
#### Docs
|
||||
- Document `TOOMANYREQUESTS` error code
|
||||
- Document required Let's Encrypt port
|
||||
- Improve documentation around implementation of OAuth2
|
||||
- Improve documentation for configuration
|
||||
|
||||
#### Auth
|
||||
- Add support for registry type in scope
|
||||
- Add support for using v2 ping challenges for v1
|
||||
- Add leeway to JWT `nbf` and `exp` checking
|
||||
- htpasswd: dynamically parse htpasswd file
|
||||
- Fix missing auth headers with PATCH HTTP request when pushing to default port
|
||||
|
||||
#### Dockerfile
|
||||
- Update to go1.7
|
||||
- Reorder Dockerfile steps for better layer caching
|
||||
|
||||
#### Notes
|
||||
|
||||
Documentation has moved to the documentation repository at
|
||||
`github.com/docker/docker.github.io/tree/master/registry`
|
||||
|
||||
The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing.
|
||||
|
||||
|
||||
## 2.5.0 (2016-06-14)
|
||||
|
||||
#### Storage
|
||||
- Ensure uploads directory is cleaned after upload is committed
|
||||
- Add ability to cap concurrent operations in filesystem driver
|
||||
- S3: Add 'us-gov-west-1' to the valid region list
|
||||
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
|
||||
- Add redirect middleware
|
||||
|
||||
#### Registry
|
||||
- Add support for blobAccessController middleware
|
||||
- Add support for layers from foreign sources
|
||||
- Remove signature store
|
||||
- Add support for Let's Encrypt
|
||||
- Correct yaml key names in configuration
|
||||
|
||||
#### Client
|
||||
- Add option to get content digest from manifest get
|
||||
|
||||
#### Spec
|
||||
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
|
||||
- Clarify API documentation around catalog fetch behavior
|
||||
|
||||
#### API
|
||||
- Support returning HTTP 429 (Too Many Requests)
|
||||
|
||||
#### Documentation
|
||||
- Update auth documentation examples to show "expires in" as int
|
||||
|
||||
#### Docker Image
|
||||
- Use Alpine Linux as base image
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
# Contributing to the registry
|
||||
|
||||
## Before reporting an issue...
|
||||
|
||||
### If your problem is with...
|
||||
|
||||
- automated builds
|
||||
- your account on the [Docker Hub](https://hub.docker.com/)
|
||||
- any other [Docker Hub](https://hub.docker.com/) issue
|
||||
|
||||
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
|
||||
|
||||
### If you...
|
||||
|
||||
- need help setting up your registry
|
||||
- can't figure out something
|
||||
- are not sure what's going on or what your problem is
|
||||
|
||||
Then please do not open an issue here yet - you should first try one of the following support forums:
|
||||
|
||||
- irc: #docker-distribution on freenode
|
||||
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
The Docker maintainers take security seriously. If you discover a security
|
||||
issue, please bring it to their attention right away!
|
||||
|
||||
Please **DO NOT** file a public issue, instead send your report privately to
|
||||
[security@docker.com](mailto:security@docker.com).
|
||||
|
||||
## Reporting an issue properly
|
||||
|
||||
By following these simple rules you will get better and faster feedback on your issue.
|
||||
|
||||
- search the bugtracker for an already reported issue
|
||||
|
||||
### If you found an issue that describes your problem:
|
||||
|
||||
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
|
||||
- please refrain from adding "same thing here" or "+1" comments
|
||||
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
|
||||
- comment if you have some new, technical and relevant information to add to the case
|
||||
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
|
||||
|
||||
### If you have not found an existing issue that describes your problem:
|
||||
|
||||
1. create a new issue, with a succinct title that describes your issue:
|
||||
- bad title: "It doesn't work with my docker"
|
||||
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
|
||||
2. copy the output of:
|
||||
- `docker version`
|
||||
- `docker info`
|
||||
- `docker exec <registry-container> registry --version`
|
||||
3. copy the command line you used to launch your Registry
|
||||
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
|
||||
5. reproduce your problem and get your docker daemon logs showing the error
|
||||
6. if relevant, copy your registry logs that show the error
|
||||
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
|
||||
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
|
||||
|
||||
## Contributing a patch for a known bug, or a small correction
|
||||
|
||||
You should follow the basic GitHub workflow:
|
||||
|
||||
1. fork
|
||||
2. commit a change
|
||||
3. make sure the tests pass
|
||||
4. PR
|
||||
|
||||
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
|
||||
|
||||
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
|
||||
- sign your commits using `-s`: `git commit -s -m "My commit"`
|
||||
|
||||
Some simple rules to ensure quick merge:
|
||||
|
||||
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
|
||||
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
|
||||
- if you need to amend your PR following comments, please squash instead of adding more commits
|
||||
|
||||
## Contributing new features
|
||||
|
||||
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
|
||||
|
||||
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
|
||||
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
|
||||
|
||||
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
|
||||
|
||||
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
|
||||
|
||||
It's mandatory to:
|
||||
|
||||
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
|
||||
- address maintainers' comments and modify your submission accordingly
|
||||
- write tests for any new code
|
||||
|
||||
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
|
||||
|
||||
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
|
||||
|
||||
## Coding Style
|
||||
|
||||
Unless explicitly stated, we follow all coding guidelines from the Go
|
||||
community. While some of these standards may seem arbitrary, they somehow seem
|
||||
to result in a solid, consistent codebase.
|
||||
|
||||
It is possible that the code base does not currently comply with these
|
||||
guidelines. We are not looking for a massive PR that fixes this, since that
|
||||
goes against the spirit of the guidelines. All new contributions should make a
|
||||
best effort to clean up and make the code base better than they left it.
|
||||
Obviously, apply your best judgement. Remember, the goal here is to make the
|
||||
code base easier for humans to navigate and understand. Always keep that in
|
||||
mind when nudging others to comply.
|
||||
|
||||
The rules:
|
||||
|
||||
1. All code should be formatted with `gofmt -s`.
|
||||
2. All code should pass the default levels of
|
||||
[`golint`](https://github.com/golang/lint).
|
||||
3. All code should follow the guidelines covered in [Effective
|
||||
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
|
||||
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
|
||||
4. Comment the code. Tell us the why, the history and the context.
|
||||
5. Document _all_ declarations and methods, even private ones. Declare
|
||||
expectations, caveats and anything else that may be important. If a type
|
||||
gets exported, having the comments already there will ensure it's ready.
|
||||
6. Variable name length should be proportional to its context and no longer.
|
||||
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
|
||||
In practice, short methods will have short variable names and globals will
|
||||
have longer names.
|
||||
7. No underscores in package names. If you need a compound name, step back,
|
||||
and re-examine why you need a compound name. If you still think you need a
|
||||
compound name, lose the underscore.
|
||||
8. No utils or helpers packages. If a function is not general enough to
|
||||
warrant its own package, it has not been written generally enough to be a
|
||||
part of a util package. Just leave it unexported and well-documented.
|
||||
9. All tests should run with `go test` and outside tooling should not be
|
||||
required. No, we don't need another unit testing framework. Assertion
|
||||
packages are acceptable if they provide _real_ incremental value.
|
||||
10. Even though we call these "rules" above, they are actually just
|
||||
guidelines. Since you've read all the rules, you now know that.
|
||||
|
||||
If you are having trouble getting into the mood of idiomatic Go, we recommend
|
||||
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
|
||||
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
|
||||
kool-aid is a lot easier than going thirsty.
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
FROM golang:1.10-alpine
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||
|
||||
ARG GOOS=linux
|
||||
ARG GOARCH=amd64
|
||||
|
||||
RUN set -ex \
|
||||
&& apk add --no-cache make git
|
||||
|
||||
WORKDIR $DISTRIBUTION_DIR
|
||||
COPY . $DISTRIBUTION_DIR
|
||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||
|
||||
RUN make PREFIX=/go clean binaries
|
||||
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
CMD ["serve", "/etc/docker/registry/config.yml"]
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
# Distribution maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/distribution project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"dmcgowan",
|
||||
"dmp42",
|
||||
"richardscothern",
|
||||
"shykes",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aaronlehmann]
|
||||
Name = "Aaron Lehmann"
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@mcgstyle.net"
|
||||
GitHub = "dmcgowan"
|
||||
|
||||
[people.dmp42]
|
||||
Name = "Olivier Gambier"
|
||||
Email = "olivier@docker.com"
|
||||
GitHub = "dmp42"
|
||||
|
||||
[people.richardscothern]
|
||||
Name = "Richard Scothern"
|
||||
Email = "richard.scothern@gmail.com"
|
||||
GitHub = "richardscothern"
|
||||
|
||||
[people.shykes]
|
||||
Name = "Solomon Hykes"
|
||||
Email = "solomon@docker.com"
|
||||
GitHub = "shykes"
|
||||
|
||||
[people.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
# Set an output prefix, which is the local directory if not specified
|
||||
PREFIX?=$(shell pwd)
|
||||
|
||||
|
||||
# Used to populate version variable in main package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
|
||||
# Allow turning off function inlining and variable registerization
|
||||
ifeq (${DISABLE_OPTIMIZATION},true)
|
||||
GO_GCFLAGS=-gcflags "-N -l"
|
||||
VERSION:="$(VERSION)-noopt"
|
||||
endif
|
||||
|
||||
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
|
||||
|
||||
.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet
|
||||
.DEFAULT: all
|
||||
all: fmt vet lint build test binaries
|
||||
|
||||
AUTHORS: .mailmap .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
||||
# This only needs to be generated by hand when cutting full releases.
|
||||
version/version.go:
|
||||
./version/version.sh > $@
|
||||
|
||||
# Required for go 1.5 to build
|
||||
GO15VENDOREXPERIMENT := 1
|
||||
|
||||
# Go files
|
||||
GOFILES=$(shell find . -type f -name '*.go')
|
||||
|
||||
# Package list
|
||||
PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
|
||||
|
||||
# Resolving binary dependencies for specific targets
|
||||
GOLINT=$(shell which golint || echo '')
|
||||
VNDR=$(shell which vndr || echo '')
|
||||
|
||||
${PREFIX}/bin/registry: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
|
||||
|
||||
${PREFIX}/bin/digest: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
|
||||
|
||||
${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES)
|
||||
@echo "+ $@"
|
||||
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
|
||||
|
||||
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
|
||||
./bin/registry-api-descriptor-template $< > $@
|
||||
|
||||
vet:
|
||||
@echo "+ $@"
|
||||
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
fmt:
|
||||
@echo "+ $@"
|
||||
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
|
||||
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
|
||||
|
||||
lint:
|
||||
@echo "+ $@"
|
||||
$(if $(GOLINT), , \
|
||||
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
|
||||
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
|
||||
|
||||
build:
|
||||
@echo "+ $@"
|
||||
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
|
||||
|
||||
test:
|
||||
@echo "+ $@"
|
||||
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
test-full:
|
||||
@echo "+ $@"
|
||||
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
|
||||
|
||||
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
|
||||
@echo "+ $@"
|
||||
|
||||
clean:
|
||||
@echo "+ $@"
|
||||
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
|
||||
|
||||
dep-validate:
|
||||
@echo "+ $@"
|
||||
$(if $(VNDR), , \
|
||||
$(error Please install vndr: go get github.com/lk4d4/vndr))
|
||||
@rm -Rf .vendor.bak
|
||||
@mv vendor .vendor.bak
|
||||
@$(VNDR)
|
||||
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
|
||||
(echo >&2 "+ inconsistent dependencies! what you have in vendor.conf does not match with what you have in vendor" && false)
|
||||
@rm -Rf vendor
|
||||
@mv .vendor.bak vendor
|
||||
|
|
@ -0,0 +1,130 @@
|
|||
# Distribution
|
||||
|
||||
The Docker toolset to pack, ship, store, and deliver content.
|
||||
|
||||
This repository's main product is the Docker Registry 2.0 implementation
|
||||
for storing and distributing Docker images. It supersedes the
|
||||
[docker/docker-registry](https://github.com/docker/docker-registry)
|
||||
project with a new API design, focused around security and performance.
|
||||
|
||||
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
|
||||
|
||||
[](https://circleci.com/gh/docker/distribution/tree/master)
|
||||
[](https://godoc.org/github.com/docker/distribution)
|
||||
|
||||
This repository contains the following components:
|
||||
|
||||
|**Component** |Description |
|
||||
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
|
||||
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
|
||||
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
|
||||
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. |
|
||||
|
||||
### How does this integrate with Docker engine?
|
||||
|
||||
This project should provide an implementation to a V2 API for use in the [Docker
|
||||
core project](https://github.com/docker/docker). The API should be embeddable
|
||||
and simplify the process of securely pulling and pushing content from `docker`
|
||||
daemons.
|
||||
|
||||
### What are the long term goals of the Distribution project?
|
||||
|
||||
The _Distribution_ project has the further long term goal of providing a
|
||||
secure tool chain for distributing content. The specifications, APIs and tools
|
||||
should be as useful with Docker as they are without.
|
||||
|
||||
Our goal is to design a professional grade and extensible content distribution
|
||||
system that allow users to:
|
||||
|
||||
* Enjoy an efficient, secured and reliable way to store, manage, package and
|
||||
exchange content
|
||||
* Hack/roll their own on top of healthy open-source components
|
||||
* Implement their own home made solution through good specs, and solid
|
||||
extensions mechanism.
|
||||
|
||||
## More about Registry 2.0
|
||||
|
||||
The new registry implementation provides the following benefits:
|
||||
|
||||
- faster push and pull
|
||||
- new, more efficient implementation
|
||||
- simplified deployment
|
||||
- pluggable storage backend
|
||||
- webhook notifications
|
||||
|
||||
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
|
||||
|
||||
### Who needs to deploy a registry?
|
||||
|
||||
By default, Docker users pull images from Docker's public registry instance.
|
||||
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
|
||||
ability. Users can also push images to a repository on Docker's public registry,
|
||||
if they have a [Docker Hub](https://hub.docker.com/) account.
|
||||
|
||||
For some users and even companies, this default behavior is sufficient. For
|
||||
others, it is not.
|
||||
|
||||
For example, users with their own software products may want to maintain a
|
||||
registry for private, company images. Also, you may wish to deploy your own
|
||||
image repository for images used to test or in continuous integration. For these
|
||||
use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md)
|
||||
may be the better choice.
|
||||
|
||||
### Migration to Registry 2.0
|
||||
|
||||
For those who have previously deployed their own registry based on the Registry
|
||||
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
|
||||
data migration is required. A tool to assist with migration efforts has been
|
||||
created. For more information see [docker/migrator](https://github.com/docker/migrator).
|
||||
|
||||
## Contribute
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
|
||||
issues, fixes, and patches to this project. If you are contributing code, see
|
||||
the instructions for [building a development environment](BUILDING.md).
|
||||
|
||||
## Support
|
||||
|
||||
If any issues are encountered while using the _Distribution_ project, several
|
||||
avenues are available for support:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th align="left">
|
||||
IRC
|
||||
</th>
|
||||
<td>
|
||||
#docker-distribution on FreeNode
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Issue Tracker
|
||||
</th>
|
||||
<td>
|
||||
github.com/docker/distribution/issues
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Google Groups
|
||||
</th>
|
||||
<td>
|
||||
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th align="left">
|
||||
Mailing List
|
||||
</th>
|
||||
<td>
|
||||
docker@dockerproject.org
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
## License
|
||||
|
||||
This project is distributed under [Apache License, Version 2.0](LICENSE).
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
## Registry Release Checklist
|
||||
|
||||
10. Compile release notes detailing features and since the last release.
|
||||
|
||||
Update the `CHANGELOG.md` file and create a PR to master with the updates.
|
||||
Once that PR has been approved by maintainers the change may be cherry-picked
|
||||
to the release branch (new release branches may be forked from this commit).
|
||||
|
||||
20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go`
|
||||
|
||||
30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files.
|
||||
|
||||
```
|
||||
make AUTHORS
|
||||
```
|
||||
|
||||
40. Create a signed tag.
|
||||
|
||||
Distribution uses semantic versioning. Tags are of the format
|
||||
`vx.y.z[-rcn]`. You will need PGP installed and a PGP key which has been added
|
||||
to your Github account. The comment for the tag should include the release
|
||||
notes, use previous tags as a guide for formatting consistently. Run
|
||||
`git tag -s vx.y.z[-rcn]` to create tag and `git -v vx.y.z[-rcn]` to verify tag,
|
||||
check comment and correct commit hash.
|
||||
|
||||
50. Push the signed tag
|
||||
|
||||
60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox.
|
||||
|
||||
70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request.
|
||||
|
||||
80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary.
|
||||
e.g. to release `2.3.1`
|
||||
|
||||
`2.3.1 (new)`
|
||||
|
||||
`2.3.0 -> 2.3.0` can be removed
|
||||
|
||||
`2 -> 2.3.1`
|
||||
|
||||
`2.3 -> 2.3.1`
|
||||
|
||||
90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images.
|
||||
|
||||
|
|
@ -0,0 +1,267 @@
|
|||
# Roadmap
|
||||
|
||||
The Distribution Project consists of several components, some of which are
|
||||
still being defined. This document defines the high-level goals of the
|
||||
project, identifies the current components, and defines the release-
|
||||
relationship to the Docker Platform.
|
||||
|
||||
* [Distribution Goals](#distribution-goals)
|
||||
* [Distribution Components](#distribution-components)
|
||||
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
|
||||
|
||||
This road map is a living document, providing an overview of the goals and
|
||||
considerations made in respect of the future of the project.
|
||||
|
||||
## Distribution Goals
|
||||
|
||||
- Replace the existing [docker registry](github.com/docker/docker-registry)
|
||||
implementation as the primary implementation.
|
||||
- Replace the existing push and pull code in the docker engine with the
|
||||
distribution package.
|
||||
- Define a strong data model for distributing docker images
|
||||
- Provide a flexible distribution tool kit for use in the docker platform
|
||||
- Unlock new distribution models
|
||||
|
||||
## Distribution Components
|
||||
|
||||
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
|
||||
features and bugfixes for a component will be added to the relevant milestone. If a feature or
|
||||
bugfix is not part of a milestone, it is currently unscheduled for
|
||||
implementation.
|
||||
|
||||
* [Registry](#registry)
|
||||
* [Distribution Package](#distribution-package)
|
||||
|
||||
***
|
||||
|
||||
### Registry
|
||||
|
||||
The new Docker registry is the main portion of the distribution repository.
|
||||
Registry 2.0 is the first release of the next-generation registry. This was
|
||||
primarily focused on implementing the [new registry
|
||||
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
|
||||
with a focus on security and performance.
|
||||
|
||||
Following from the Distribution project goals above, we have a set of goals
|
||||
for registry v2 that we would like to follow in the design. New features
|
||||
should be compared against these goals.
|
||||
|
||||
#### Data Storage and Distribution First
|
||||
|
||||
The registry's first goal is to provide a reliable, consistent storage
|
||||
location for Docker images. The registry should only provide the minimal
|
||||
amount of indexing required to fetch image data and no more.
|
||||
|
||||
This means we should be selective in new features and API additions, including
|
||||
those that may require expensive, ever growing indexes. Requests should be
|
||||
servable in "constant time".
|
||||
|
||||
#### Content Addressability
|
||||
|
||||
All data objects used in the registry API should be content addressable.
|
||||
Content identifiers should be secure and verifiable. This provides a secure,
|
||||
reliable base from which to build more advanced content distribution systems.
|
||||
|
||||
#### Content Agnostic
|
||||
|
||||
In the past, changes to the image format would require large changes in Docker
|
||||
and the Registry. By decoupling the distribution and image format, we can
|
||||
allow the formats to progress without having to coordinate between the two.
|
||||
This means that we should be focused on decoupling Docker from the registry
|
||||
just as much as decoupling the registry from Docker. Such an approach will
|
||||
allow us to unlock new distribution models that haven't been possible before.
|
||||
|
||||
We can take this further by saying that the new registry should be content
|
||||
agnostic. The registry provides a model of names, tags, manifests and content
|
||||
addresses and that model can be used to work with content.
|
||||
|
||||
#### Simplicity
|
||||
|
||||
The new registry should be closer to a microservice component than its
|
||||
predecessor. This means it should have a narrower API and a low number of
|
||||
service dependencies. It should be easy to deploy.
|
||||
|
||||
This means that other solutions should be explored before changing the API or
|
||||
adding extra dependencies. If functionality is required, can it be added as an
|
||||
extension or companion service.
|
||||
|
||||
#### Extensibility
|
||||
|
||||
The registry should provide extension points to add functionality. By keeping
|
||||
the scope narrow, but providing the ability to add functionality.
|
||||
|
||||
Features like search, indexing, synchronization and registry explorers fall
|
||||
into this category. No such feature should be added unless we've found it
|
||||
impossible to do through an extension.
|
||||
|
||||
#### Active Feature Discussions
|
||||
|
||||
The following are feature discussions that are currently active.
|
||||
|
||||
If you don't see your favorite, unimplemented feature, feel free to contact us
|
||||
via IRC or the mailing list and we can talk about adding it. The goal here is
|
||||
to make sure that new features go through a rigid design process before
|
||||
landing in the registry.
|
||||
|
||||
##### Proxying to other Registries
|
||||
|
||||
A _pull-through caching_ mode exists for the registry, but is restricted from
|
||||
within the docker client to only mirror the official Docker Hub. This functionality
|
||||
can be expanded when image provenance has been specified and implemented in the
|
||||
distribution project.
|
||||
|
||||
##### Metadata storage
|
||||
|
||||
Metadata for the registry is currently stored with the manifest and layer data on
|
||||
the storage backend. While this is a big win for simplicity and reliably maintaining
|
||||
state, it comes with the cost of consistency and high latency. The mutable registry
|
||||
metadata operations should be abstracted behind an API which will allow ACID compliant
|
||||
storage systems to handle metadata.
|
||||
|
||||
##### Peer to Peer transfer
|
||||
|
||||
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
|
||||
|
||||
##### Indexing, Search and Discovery
|
||||
|
||||
The original registry provided some implementation of search for use with
|
||||
private registries. Support has been elided from V2 since we'd like to both
|
||||
decouple search functionality from the registry. The makes the registry
|
||||
simpler to deploy, especially in use cases where search is not needed, and
|
||||
let's us decouple the image format from the registry.
|
||||
|
||||
There are explorations into using the catalog API and notification system to
|
||||
build external indexes. The current line of thought is that we will define a
|
||||
common search API to index and query docker images. Such a system could be run
|
||||
as a companion to a registry or set of registries to power discovery.
|
||||
|
||||
The main issue with search and discovery is that there are so many ways to
|
||||
accomplish it. There are two aspects to this project. The first is deciding on
|
||||
how it will be done, including an API definition that can work with changing
|
||||
data formats. The second is the process of integrating with `docker search`.
|
||||
We expect that someone attempts to address the problem with the existing tools
|
||||
and propose it as a standard search API or uses it to inform a standardization
|
||||
process. Once this has been explored, we integrate with the docker client.
|
||||
|
||||
Please see the following for more detail:
|
||||
|
||||
- https://github.com/docker/distribution/issues/206
|
||||
|
||||
##### Deletes
|
||||
|
||||
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
|
||||
feature or participating in discussion, we ask that you read this section in
|
||||
full and understand the problems behind deletes.
|
||||
|
||||
While, at first glance, implementing deleting seems simple, there are a number
|
||||
mitigating factors that make many solutions not ideal or even pathological in
|
||||
the context of a registry. The following paragraph discuss the background and
|
||||
approaches that could be applied to arrive at a solution.
|
||||
|
||||
The goal of deletes in any system is to remove unused or unneeded data. Only
|
||||
data requested for deletion should be removed and no other data. Removing
|
||||
unintended data is worse than _not_ removing data that was requested for
|
||||
removal but ideally, both are supported. Generally, according to this rule, we
|
||||
err on holding data longer than needed, ensuring that it is only removed when
|
||||
we can be certain that it can be removed. With the current behavior, we opt to
|
||||
hold onto the data forever, ensuring that data cannot be incorrectly removed.
|
||||
|
||||
To understand the problems with implementing deletes, one must understand the
|
||||
data model. All registry data is stored in a filesystem layout, implemented on
|
||||
a "storage driver", effectively a _virtual file system_ (VFS). The storage
|
||||
system must assume that this VFS layer will be eventually consistent and has
|
||||
poor read- after-write consistency, since this is the lower common denominator
|
||||
among the storage drivers. This is mitigated by writing values in reverse-
|
||||
dependent order, but makes wider transactional operations unsafe.
|
||||
|
||||
Layered on the VFS model is a content-addressable _directed, acyclic graph_
|
||||
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
|
||||
Since the same data can be referenced by multiple manifests, we only store
|
||||
data once, even if it is in different repositories. Thus, we have a set of
|
||||
blobs, referenced by tags and manifests. If we want to delete a blob we need
|
||||
to be certain that it is no longer referenced by another manifest or tag. When
|
||||
we delete a manifest, we also can try to delete the referenced blobs. Deciding
|
||||
whether or not a blob has an active reference is the crux of the problem.
|
||||
|
||||
Conceptually, deleting a manifest and its resources is quite simple. Just find
|
||||
all the manifests, enumerate the referenced blobs and delete the blobs not in
|
||||
that set. An astute observer will recognize this as a garbage collection
|
||||
problem. As with garbage collection in programming languages, this is very
|
||||
simple when one always has a consistent view. When one adds parallelism and an
|
||||
inconsistent view of data, it becomes very challenging.
|
||||
|
||||
A simple example can demonstrate this. Let's say we are deleting a manifest
|
||||
_A_ in one process. We scan the manifest and decide that all the blobs are
|
||||
ready for deletion. Concurrently, we have another process accepting a new
|
||||
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
|
||||
is accepted and all the blobs are considered present, so the operation
|
||||
proceeds. The original process then deletes the referenced blobs, assuming
|
||||
they were unreferenced. The manifest _B_, which we thought had all of its data
|
||||
present, can no longer be served by the registry, since the dependent data has
|
||||
been deleted.
|
||||
|
||||
Deleting data from the registry safely requires some way to coordinate this
|
||||
operation. The following approaches are being considered:
|
||||
|
||||
- _Reference Counting_ - Maintain a count of references to each blob. This is
|
||||
challenging for a number of reasons: 1. maintaining a consistent consensus
|
||||
of reference counts across a set of Registries and 2. Building the initial
|
||||
list of reference counts for an existing registry. These challenges can be
|
||||
met with a consensus protocol like Paxos or Raft in the first case and a
|
||||
necessary but simple scan in the second..
|
||||
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
|
||||
and find all blob references. Delete all unreferenced blobs. This approach
|
||||
is very simple but requires disabling writes for a period of time while the
|
||||
service reads all data. This is slow and expensive but very accurate and
|
||||
effective.
|
||||
- _Generational GC_ - Do something similar to above but instead of blocking
|
||||
writes, writes are sent to another storage backend while reads are broadcast
|
||||
to the new and old backends. GC is then performed on the read-only portion.
|
||||
Because writes land in the new backend, the data in the read-only section
|
||||
can be safely deleted. The main drawbacks of this approach are complexity
|
||||
and coordination.
|
||||
- _Centralized Oracle_ - Using a centralized, transactional database, we can
|
||||
know exactly which data is referenced at any given time. This avoids
|
||||
coordination problem by managing this data in a single location. We trade
|
||||
off metadata scalability for simplicity and performance. This is a very good
|
||||
option for most registry deployments. This would create a bottleneck for
|
||||
registry metadata. However, metadata is generally not the main bottleneck
|
||||
when serving images.
|
||||
|
||||
Please let us know if other solutions exist that we have yet to enumerate.
|
||||
Note that for any approach, implementation is a massive consideration. For
|
||||
example, a mark-sweep based solution may seem simple but the amount of work in
|
||||
coordination offset the extra work it might take to build a _Centralized
|
||||
Oracle_. We'll accept proposals for any solution but please coordinate with us
|
||||
before dropping code.
|
||||
|
||||
At this time, we have traded off simplicity and ease of deployment for disk
|
||||
space. Simplicity and ease of deployment tend to reduce developer involvement,
|
||||
which is currently the most expensive resource in software engineering. Taking
|
||||
on any solution for deletes will greatly effect these factors, trading off
|
||||
very cheap disk space for a complex deployment and operational story.
|
||||
|
||||
Please see the following issues for more detail:
|
||||
|
||||
- https://github.com/docker/distribution/issues/422
|
||||
- https://github.com/docker/distribution/issues/461
|
||||
- https://github.com/docker/distribution/issues/462
|
||||
|
||||
### Distribution Package
|
||||
|
||||
At its core, the Distribution Project is a set of Go packages that make up
|
||||
Distribution Components. At this time, most of these packages make up the
|
||||
Registry implementation.
|
||||
|
||||
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
|
||||
|
||||
For feature additions, please see the Registry section. In the future, we may break out a
|
||||
separate Roadmap for distribution-specific features that apply to more than
|
||||
just the registry.
|
||||
|
||||
***
|
||||
|
||||
### Project Planning
|
||||
|
||||
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.
|
||||
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
# Pony-up!
|
||||
machine:
|
||||
pre:
|
||||
# Install gvm
|
||||
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
|
||||
# Install codecov for coverage
|
||||
- pip install --user codecov
|
||||
|
||||
post:
|
||||
# go
|
||||
- gvm install go1.10 --prefer-binary --name=stable
|
||||
|
||||
environment:
|
||||
# Convenient shortcuts to "common" locations
|
||||
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
|
||||
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
# Trick circle brainflat "no absolute path" behavior
|
||||
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
|
||||
DOCKER_BUILDTAGS: "include_oss include_gcs"
|
||||
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
|
||||
CIRCLE_PAIN: "mode: set"
|
||||
|
||||
hosts:
|
||||
# Not used yet
|
||||
fancy: 127.0.0.1
|
||||
|
||||
dependencies:
|
||||
pre:
|
||||
# Copy the code to the gopath of all go versions
|
||||
- >
|
||||
gvm use stable &&
|
||||
mkdir -p "$(dirname $BASE_STABLE)" &&
|
||||
cp -R "$CHECKOUT" "$BASE_STABLE"
|
||||
|
||||
override:
|
||||
# Install dependencies for every copied clone/go version
|
||||
- gvm use stable && go get github.com/lk4d4/vndr:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
post:
|
||||
# For the stable go version, additionally install linting tools
|
||||
- >
|
||||
gvm use stable &&
|
||||
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
|
||||
|
||||
test:
|
||||
pre:
|
||||
# Output the go versions we are going to test
|
||||
# - gvm use old && go version
|
||||
- gvm use stable && go version
|
||||
|
||||
# Ensure validation of dependencies
|
||||
- git fetch origin:
|
||||
pwd: $BASE_STABLE
|
||||
- gvm use stable && if test -n "`git diff --stat=1000 origin/master | grep -E \"^[[:space:]]*vendor\"`"; then make dep-validate; fi:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# First thing: build everything. This will catch compile errors, and it's
|
||||
# also necessary for go vet to work properly (see #807).
|
||||
- gvm use stable && go install $(go list ./... | grep -v "/vendor/"):
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# FMT
|
||||
- gvm use stable && make fmt:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# VET
|
||||
- gvm use stable && make vet:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# LINT
|
||||
- gvm use stable && make lint:
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
override:
|
||||
# Test stable, and report
|
||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
|
||||
timeout: 1000
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
# Test stable with race
|
||||
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE':
|
||||
timeout: 1000
|
||||
pwd: $BASE_STABLE
|
||||
post:
|
||||
# Report to codecov
|
||||
- bash <(curl -s https://codecov.io/bash):
|
||||
pwd: $BASE_STABLE
|
||||
|
||||
## Notes
|
||||
# Do we want these as well?
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
|
||||
# http://labix.org/gocheck
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
# Given a subpackage and the containing package, figures out which packages
|
||||
# need to be passed to `go test -coverpkg`: this includes all of the
|
||||
# subpackage's dependencies within the containing package, as well as the
|
||||
# subpackage itself.
|
||||
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
|
||||
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
|
||||
github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
|
||||
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
|
||||
github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
|
||||
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
|
||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
||||
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
|
||||
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
||||
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
||||
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
|
||||
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
|
||||
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
|
||||
github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
||||
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
||||
github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
|
||||
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
|
||||
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
|
||||
github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
|
||||
github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
|
||||
github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
|
||||
github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
|
||||
github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6
|
||||
golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b
|
||||
golang.org/x/net 4876518f9e71663000c348837735820161a42df7
|
||||
golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf
|
||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54
|
||||
google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19
|
||||
google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
|
||||
google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
|
||||
gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
|
||||
gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
|
||||
gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
|
||||
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
# Working on the Engine API
|
||||
|
||||
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
|
||||
|
||||
It consists of various components in this repository:
|
||||
|
||||
- `api/swagger.yaml` A Swagger definition of the API.
|
||||
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
|
||||
- `cli/` The command-line client.
|
||||
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
|
||||
- `daemon/` The daemon, which serves the API.
|
||||
|
||||
## Swagger definition
|
||||
|
||||
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
|
||||
|
||||
1. Automatically generate documentation.
|
||||
2. Automatically generate the Go server and client. (A work-in-progress.)
|
||||
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
|
||||
|
||||
## Updating the API documentation
|
||||
|
||||
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
|
||||
|
||||
The file is split into two main sections:
|
||||
|
||||
- `definitions`, which defines re-usable objects used in requests and responses
|
||||
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
|
||||
|
||||
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
|
||||
|
||||
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
|
||||
|
||||
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
|
||||
|
||||
## Viewing the API documentation
|
||||
|
||||
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
|
||||
|
||||
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
||||
|
||||
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
layout:
|
||||
models:
|
||||
- name: definition
|
||||
source: asset:model
|
||||
target: "{{ joinFilePath .Target .ModelPackage }}"
|
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go"
|
||||
operations:
|
||||
- name: handler
|
||||
source: asset:serverOperation
|
||||
target: "{{ joinFilePath .Target .APIPackage .Package }}"
|
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go"
|
||||
File diff suppressed because it is too large
Load Diff
8
vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto
generated
vendored
Normal file
8
vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.proto
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
syntax = "proto3";
|
||||
|
||||
message LogEntry {
|
||||
string source = 1;
|
||||
int64 time_nano = 2;
|
||||
bytes line = 3;
|
||||
bool partial = 4;
|
||||
}
|
||||
20
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
generated
vendored
Normal file
20
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
syntax = "proto3";
|
||||
|
||||
option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
|
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating
|
||||
// a service with the plugin runtime.
|
||||
message PluginSpec {
|
||||
string name = 1;
|
||||
string remote = 2;
|
||||
repeated PluginPrivilege privileges = 3;
|
||||
bool disabled = 4;
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
message PluginPrivilege {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
repeated string value = 3;
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
# Legacy API type versions
|
||||
|
||||
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
|
||||
|
||||
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
|
||||
|
||||
## Package name conventions
|
||||
|
||||
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
|
||||
|
||||
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
|
||||
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
|
||||
|
||||
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
|
||||
232
vendor/github.com/docker/docker/builder/dockerfile/shell/envVarTest
generated
vendored
Normal file
232
vendor/github.com/docker/docker/builder/dockerfile/shell/envVarTest
generated
vendored
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
A|hello | hello
|
||||
A|he'll'o | hello
|
||||
A|he'llo | error
|
||||
A|he\'llo | he'llo
|
||||
A|he\\'llo | error
|
||||
A|abc\tdef | abctdef
|
||||
A|"abc\tdef" | abc\tdef
|
||||
A|"abc\\tdef" | abc\tdef
|
||||
A|'abc\tdef' | abc\tdef
|
||||
A|hello\ | hello
|
||||
A|hello\\ | hello\
|
||||
A|"hello | error
|
||||
A|"hello\" | error
|
||||
A|"hel'lo" | hel'lo
|
||||
A|'hello | error
|
||||
A|'hello\' | hello\
|
||||
A|'hello\there' | hello\there
|
||||
A|'hello\\there' | hello\\there
|
||||
A|"''" | ''
|
||||
A|$. | $.
|
||||
A|he$1x | hex
|
||||
A|he$.x | he$.x
|
||||
# Next one is different on Windows as $pwd==$PWD
|
||||
U|he$pwd. | he.
|
||||
W|he$pwd. | he/home.
|
||||
A|he$PWD | he/home
|
||||
A|he\$PWD | he$PWD
|
||||
A|he\\$PWD | he\/home
|
||||
A|"he\$PWD" | he$PWD
|
||||
A|"he\\$PWD" | he\/home
|
||||
A|\${} | ${}
|
||||
A|\${}aaa | ${}aaa
|
||||
A|he\${} | he${}
|
||||
A|he\${}xx | he${}xx
|
||||
A|${} | error
|
||||
A|${}aaa | error
|
||||
A|he${} | error
|
||||
A|he${}xx | error
|
||||
A|he${hi} | he
|
||||
A|he${hi}xx | hexx
|
||||
A|he${PWD} | he/home
|
||||
A|he${.} | error
|
||||
A|he${XXX:-000}xx | he000xx
|
||||
A|he${PWD:-000}xx | he/homexx
|
||||
A|he${XXX:-$PWD}xx | he/homexx
|
||||
A|he${XXX:-${PWD:-yyy}}xx | he/homexx
|
||||
A|he${XXX:-${YYY:-yyy}}xx | heyyyxx
|
||||
A|he${XXX:YYY} | error
|
||||
A|he${XXX:+${PWD}}xx | hexx
|
||||
A|he${PWD:+${XXX}}xx | hexx
|
||||
A|he${PWD:+${SHELL}}xx | hebashxx
|
||||
A|he${XXX:+000}xx | hexx
|
||||
A|he${PWD:+000}xx | he000xx
|
||||
A|'he${XX}' | he${XX}
|
||||
A|"he${PWD}" | he/home
|
||||
A|"he'$PWD'" | he'/home'
|
||||
A|"$PWD" | /home
|
||||
A|'$PWD' | $PWD
|
||||
A|'\$PWD' | \$PWD
|
||||
A|'"hello"' | "hello"
|
||||
A|he\$PWD | he$PWD
|
||||
A|"he\$PWD" | he$PWD
|
||||
A|'he\$PWD' | he\$PWD
|
||||
A|he${PWD | error
|
||||
A|he${PWD:=000}xx | error
|
||||
A|he${PWD:+${PWD}:}xx | he/home:xx
|
||||
A|he${XXX:-\$PWD:}xx | he$PWD:xx
|
||||
A|he${XXX:-\${PWD}z}xx | he${PWDz}xx
|
||||
A|안녕하세요 | 안녕하세요
|
||||
A|안'녕'하세요 | 안녕하세요
|
||||
A|안'녕하세요 | error
|
||||
A|안녕\'하세요 | 안녕'하세요
|
||||
A|안\\'녕하세요 | error
|
||||
A|안녕\t하세요 | 안녕t하세요
|
||||
A|"안녕\t하세요" | 안녕\t하세요
|
||||
A|'안녕\t하세요 | error
|
||||
A|안녕하세요\ | 안녕하세요
|
||||
A|안녕하세요\\ | 안녕하세요\
|
||||
A|"안녕하세요 | error
|
||||
A|"안녕하세요\" | error
|
||||
A|"안녕'하세요" | 안녕'하세요
|
||||
A|'안녕하세요 | error
|
||||
A|'안녕하세요\' | 안녕하세요\
|
||||
A|안녕$1x | 안녕x
|
||||
A|안녕$.x | 안녕$.x
|
||||
# Next one is different on Windows as $pwd==$PWD
|
||||
U|안녕$pwd. | 안녕.
|
||||
W|안녕$pwd. | 안녕/home.
|
||||
A|안녕$PWD | 안녕/home
|
||||
A|안녕\$PWD | 안녕$PWD
|
||||
A|안녕\\$PWD | 안녕\/home
|
||||
A|안녕\${} | 안녕${}
|
||||
A|안녕\${}xx | 안녕${}xx
|
||||
A|안녕${} | error
|
||||
A|안녕${}xx | error
|
||||
A|안녕${hi} | 안녕
|
||||
A|안녕${hi}xx | 안녕xx
|
||||
A|안녕${PWD} | 안녕/home
|
||||
A|안녕${.} | error
|
||||
A|안녕${XXX:-000}xx | 안녕000xx
|
||||
A|안녕${PWD:-000}xx | 안녕/homexx
|
||||
A|안녕${XXX:-$PWD}xx | 안녕/homexx
|
||||
A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx
|
||||
A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx
|
||||
A|안녕${XXX:YYY} | error
|
||||
A|안녕${XXX:+${PWD}}xx | 안녕xx
|
||||
A|안녕${PWD:+${XXX}}xx | 안녕xx
|
||||
A|안녕${PWD:+${SHELL}}xx | 안녕bashxx
|
||||
A|안녕${XXX:+000}xx | 안녕xx
|
||||
A|안녕${PWD:+000}xx | 안녕000xx
|
||||
A|'안녕${XX}' | 안녕${XX}
|
||||
A|"안녕${PWD}" | 안녕/home
|
||||
A|"안녕'$PWD'" | 안녕'/home'
|
||||
A|'"안녕"' | "안녕"
|
||||
A|안녕\$PWD | 안녕$PWD
|
||||
A|"안녕\$PWD" | 안녕$PWD
|
||||
A|'안녕\$PWD' | 안녕\$PWD
|
||||
A|안녕${PWD | error
|
||||
A|안녕${PWD:=000}xx | error
|
||||
A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx
|
||||
A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx
|
||||
A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx
|
||||
A|$KOREAN | 한국어
|
||||
A|안녕$KOREAN | 안녕한국어
|
||||
A|${{aaa} | error
|
||||
A|${aaa}} | }
|
||||
A|${aaa | error
|
||||
A|${{aaa:-bbb} | error
|
||||
A|${aaa:-bbb}} | bbb}
|
||||
A|${aaa:-bbb | error
|
||||
A|${aaa:-bbb} | bbb
|
||||
A|${aaa:-${bbb:-ccc}} | ccc
|
||||
A|${aaa:-bbb ${foo} | error
|
||||
A|${aaa:-bbb {foo} | bbb {foo
|
||||
A|${:} | error
|
||||
A|${:-bbb} | error
|
||||
A|${:+bbb} | error
|
||||
|
||||
# Positional parameters won't be set:
|
||||
# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01
|
||||
A|$1 |
|
||||
A|${1} |
|
||||
A|${1:+bbb} |
|
||||
A|${1:-bbb} | bbb
|
||||
A|$2 |
|
||||
A|${2} |
|
||||
A|${2:+bbb} |
|
||||
A|${2:-bbb} | bbb
|
||||
A|$3 |
|
||||
A|${3} |
|
||||
A|${3:+bbb} |
|
||||
A|${3:-bbb} | bbb
|
||||
A|$4 |
|
||||
A|${4} |
|
||||
A|${4:+bbb} |
|
||||
A|${4:-bbb} | bbb
|
||||
A|$5 |
|
||||
A|${5} |
|
||||
A|${5:+bbb} |
|
||||
A|${5:-bbb} | bbb
|
||||
A|$6 |
|
||||
A|${6} |
|
||||
A|${6:+bbb} |
|
||||
A|${6:-bbb} | bbb
|
||||
A|$7 |
|
||||
A|${7} |
|
||||
A|${7:+bbb} |
|
||||
A|${7:-bbb} | bbb
|
||||
A|$8 |
|
||||
A|${8} |
|
||||
A|${8:+bbb} |
|
||||
A|${8:-bbb} | bbb
|
||||
A|$9 |
|
||||
A|${9} |
|
||||
A|${9:+bbb} |
|
||||
A|${9:-bbb} | bbb
|
||||
A|$999 |
|
||||
A|${999} |
|
||||
A|${999:+bbb} |
|
||||
A|${999:-bbb} | bbb
|
||||
A|$999aaa | aaa
|
||||
A|${999}aaa | aaa
|
||||
A|${999:+bbb}aaa | aaa
|
||||
A|${999:-bbb}aaa | bbbaaa
|
||||
A|$001 |
|
||||
A|${001} |
|
||||
A|${001:+bbb} |
|
||||
A|${001:-bbb} | bbb
|
||||
A|$001aaa | aaa
|
||||
A|${001}aaa | aaa
|
||||
A|${001:+bbb}aaa | aaa
|
||||
A|${001:-bbb}aaa | bbbaaa
|
||||
|
||||
# Special parameters won't be set in the Dockerfile:
|
||||
# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02
|
||||
A|$@ |
|
||||
A|${@} |
|
||||
A|${@:+bbb} |
|
||||
A|${@:-bbb} | bbb
|
||||
A|$@@@ | @@
|
||||
A|$@aaa | aaa
|
||||
A|${@}aaa | aaa
|
||||
A|${@:+bbb}aaa | aaa
|
||||
A|${@:-bbb}aaa | bbbaaa
|
||||
A|$* |
|
||||
A|${*} |
|
||||
A|${*:+bbb} |
|
||||
A|${*:-bbb} | bbb
|
||||
A|$# |
|
||||
A|${#} |
|
||||
A|${#:+bbb} |
|
||||
A|${#:-bbb} | bbb
|
||||
A|$? |
|
||||
A|${?} |
|
||||
A|${?:+bbb} |
|
||||
A|${?:-bbb} | bbb
|
||||
A|$- |
|
||||
A|${-} |
|
||||
A|${-:+bbb} |
|
||||
A|${-:-bbb} | bbb
|
||||
A|$$ |
|
||||
A|${$} |
|
||||
A|${$:+bbb} |
|
||||
A|${$:-bbb} | bbb
|
||||
A|$! |
|
||||
A|${!} |
|
||||
A|${!:+bbb} |
|
||||
A|${!:-bbb} | bbb
|
||||
A|$0 |
|
||||
A|${0} |
|
||||
A|${0:+bbb} |
|
||||
A|${0:-bbb} | bbb
|
||||
30
vendor/github.com/docker/docker/builder/dockerfile/shell/wordsTest
generated
vendored
Normal file
30
vendor/github.com/docker/docker/builder/dockerfile/shell/wordsTest
generated
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
hello | hello
|
||||
hello${hi}bye | hellobye
|
||||
ENV hi=hi
|
||||
hello${hi}bye | hellohibye
|
||||
ENV space=abc def
|
||||
hello${space}bye | helloabc,defbye
|
||||
hello"${space}"bye | helloabc defbye
|
||||
hello "${space}"bye | hello,abc defbye
|
||||
ENV leading= ab c
|
||||
hello${leading}def | hello,ab,cdef
|
||||
hello"${leading}" def | hello ab c,def
|
||||
hello"${leading}" | hello ab c
|
||||
hello${leading} | hello,ab,c
|
||||
# next line MUST have 3 trailing spaces, don't erase them!
|
||||
ENV trailing=ab c
|
||||
hello${trailing} | helloab,c
|
||||
hello${trailing}d | helloab,c,d
|
||||
hello"${trailing}"d | helloab c d
|
||||
# next line MUST have 3 trailing spaces, don't erase them!
|
||||
hel"lo${trailing}" | helloab c
|
||||
hello" there " | hello there
|
||||
hello there | hello,there
|
||||
hello\ there | hello there
|
||||
hello" there | error
|
||||
hello\" there | hello",there
|
||||
hello"\\there" | hello\there
|
||||
hello"\there" | hello\there
|
||||
hello'\\there' | hello\\there
|
||||
hello'\there' | hello\there
|
||||
hello'$there' | hello$there
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package remotecontext; // no namespace because only used internally
|
||||
|
||||
message TarsumBackup {
|
||||
map<string, string> Hashes = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# Go client for the Docker Engine API
|
||||
|
||||
The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
|
||||
|
||||
For example, to list running containers (the equivalent of `docker ps`):
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli, err := client.NewEnvClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
fmt.Printf("%s %s\n", container.ID[:10], container.Image)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
Copyright (c) 2013 Honza Pokorny
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 23 KiB |
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
||||
|
||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||
|
||||
{
|
||||
cat <<-'EOH'
|
||||
# This file lists all individuals having contributed content to the repository.
|
||||
# For how it is generated, see `hack/generate-authors.sh`.
|
||||
EOH
|
||||
echo
|
||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||
} > AUTHORS
|
||||
|
|
@ -1 +0,0 @@
|
|||
../../../integration/testdata/https/ca.pem
|
||||
|
|
@ -1 +0,0 @@
|
|||
../../../integration/testdata/https/client-cert.pem
|
||||
|
|
@ -1 +0,0 @@
|
|||
../../../integration/testdata/https/client-key.pem
|
||||
|
|
@ -1 +0,0 @@
|
|||
../../../integration/testdata/https/server-cert.pem
|
||||
|
|
@ -1 +0,0 @@
|
|||
../../../integration/testdata/https/server-key.pem
|
||||
|
|
@ -0,0 +1 @@
|
|||
This code provides helper functions for dealing with archive files.
|
||||
|
|
@ -1,97 +0,0 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# reexec
|
||||
|
||||
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
|
||||
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
|
||||
the exec of the binary will be used to find and execute custom init paths.
|
||||
|
|
@ -0,0 +1 @@
|
|||
This package provides helper functions for dealing with signals across various operating systems
|
||||
|
|
@ -0,0 +1 @@
|
|||
This package provides helper functions for dealing with string identifiers
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks,
|
||||
as well as a Windows long-path aware version of filepath.EvalSymlinks
|
||||
from the [Go standard library](https://golang.org/pkg/path/filepath).
|
||||
|
||||
The code from filepath.EvalSymlinks has been adapted in fs.go.
|
||||
Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
|
||||
|
|
@ -0,0 +1,230 @@
|
|||
page_title: TarSum checksum specification
|
||||
page_description: Documentation for algorithms used in the TarSum checksum calculation
|
||||
page_keywords: docker, checksum, validation, tarsum
|
||||
|
||||
# TarSum Checksum Specification
|
||||
|
||||
## Abstract
|
||||
|
||||
This document describes the algorithms used in performing the TarSum checksum
|
||||
calculation on filesystem layers, the need for this method over existing
|
||||
methods, and the versioning of this calculation.
|
||||
|
||||
## Warning
|
||||
|
||||
This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
|
||||
|
||||
This is _not_ a cryptographic attestation, and should not be considered secure.
|
||||
|
||||
## Introduction
|
||||
|
||||
The transportation of filesystems, regarding Docker, is done with tar(1)
|
||||
archives. There are a variety of tar serialization formats [2], and a key
|
||||
concern here is ensuring a repeatable checksum given a set of inputs from a
|
||||
generic tar archive. Types of transportation include distribution to and from a
|
||||
registry endpoint, saving and loading through commands or Docker daemon APIs,
|
||||
transferring the build context from client to Docker daemon, and committing the
|
||||
filesystem of a container to become an image.
|
||||
|
||||
As tar archives are used for transit, but not preserved in many situations, the
|
||||
focus of the algorithm is to ensure the integrity of the preserved filesystem,
|
||||
while maintaining a deterministic accountability. This includes neither
|
||||
constraining the ordering or manipulation of the files during the creation or
|
||||
unpacking of the archive, nor include additional metadata state about the file
|
||||
system attributes.
|
||||
|
||||
## Intended Audience
|
||||
|
||||
This document is outlining the methods used for consistent checksum calculation
|
||||
for filesystems transported via tar archives.
|
||||
|
||||
Auditing these methodologies is an open and iterative process. This document
|
||||
should accommodate the review of source code. Ultimately, this document should
|
||||
be the starting point of further refinements to the algorithm and its future
|
||||
versions.
|
||||
|
||||
## Concept
|
||||
|
||||
The checksum mechanism must ensure the integrity and assurance of the
|
||||
filesystem payload.
|
||||
|
||||
## Checksum Algorithm Profile
|
||||
|
||||
A checksum mechanism must define the following operations and attributes:
|
||||
|
||||
* Associated hashing cipher - used to checksum each file payload and attribute
|
||||
information.
|
||||
* Checksum list - each file of the filesystem archive has its checksum
|
||||
calculated from the payload and attributes of the file. The final checksum is
|
||||
calculated from this list, with specific ordering.
|
||||
* Version - as the algorithm adapts to requirements, there are behaviors of the
|
||||
algorithm to manage by versioning.
|
||||
* Archive being calculated - the tar archive having its checksum calculated
|
||||
|
||||
## Elements of TarSum checksum
|
||||
|
||||
The calculated sum output is a text string. The elements included in the output
|
||||
of the calculated sum comprise the information needed for validation of the sum
|
||||
(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
|
||||
form.
|
||||
|
||||
There are two delimiters used:
|
||||
* '+' separates TarSum version from hashing cipher
|
||||
* ':' separates calculation mechanics from expected hash
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
"tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
|
||||
| | \ |
|
||||
| | \ |
|
||||
|_version_|_cipher__|__ |
|
||||
| \ |
|
||||
|_calculation_mechanics_|______________________expected_sum_______________________|
|
||||
```
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning was introduced [0] to accommodate differences in calculation needed,
|
||||
and ability to maintain reverse compatibility.
|
||||
|
||||
The general algorithm will be describe further in the 'Calculation'.
|
||||
|
||||
### Version0
|
||||
|
||||
This is the initial version of TarSum.
|
||||
|
||||
Its element in the TarSum checksum string is `tarsum`.
|
||||
|
||||
### Version1
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.v1`.
|
||||
|
||||
The notable changes in this version:
|
||||
* Exclusion of file `mtime` from the file information headers, in each file
|
||||
checksum calculation
|
||||
* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
|
||||
tar file info headers) keys and values in each file checksum calculation
|
||||
|
||||
### VersionDev
|
||||
|
||||
*Do not use unless validating refinements to the checksum algorithm*
|
||||
|
||||
Its element in the TarSum checksum is `tarsum.dev`.
|
||||
|
||||
This is a floating place holder for a next version and grounds for testing
|
||||
changes. The methods used for calculation are subject to change without notice,
|
||||
and this version is for testing and not for production use.
|
||||
|
||||
## Ciphers
|
||||
|
||||
The official default and standard hashing cipher used in the calculation mechanic
|
||||
is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
|
||||
|
||||
Though the TarSum algorithm itself is not exclusively bound to the single
|
||||
hashing cipher `sha256`, support for alternate hashing ciphers was later added
|
||||
[1]. Use cases for alternate cipher could include future-proofing TarSum
|
||||
checksum format and using faster cipher hashes for tar filesystem checksums.
|
||||
|
||||
## Calculation
|
||||
|
||||
### Requirement
|
||||
|
||||
As mentioned earlier, the calculation is such that it takes into consideration
|
||||
the lifecycle of the tar archive. In that the tar archive is not an immutable,
|
||||
permanent artifact. Otherwise options like relying on a known hashing cipher
|
||||
checksum of the archive itself would be reliable enough. The tar archive of the
|
||||
filesystem is used as a transportation medium for Docker images, and the
|
||||
archive is discarded once its contents are extracted. Therefore, for consistent
|
||||
validation items such as order of files in the tar archive and time stamps are
|
||||
subject to change once an image is received.
|
||||
|
||||
### Process
|
||||
|
||||
The method is typically iterative due to reading tar info headers from the
|
||||
archive stream, though this is not a strict requirement.
|
||||
|
||||
#### Files
|
||||
|
||||
Each file in the tar archive have their contents (headers and body) checksummed
|
||||
individually using the designated associated hashing cipher. The ordered
|
||||
headers of the file are written to the checksum calculation first, and then the
|
||||
payload of the file body.
|
||||
|
||||
The resulting checksum of the file is appended to the list of file sums. The
|
||||
sum is encoded as a string of the hexadecimal digest. Additionally, the file
|
||||
name and position in the archive is kept as reference for special ordering.
|
||||
|
||||
#### Headers
|
||||
|
||||
The following headers are read, in this
|
||||
order ( and the corresponding representation of its value):
|
||||
* 'name' - string
|
||||
* 'mode' - string of the base10 integer
|
||||
* 'uid' - string of the integer
|
||||
* 'gid' - string of the integer
|
||||
* 'size' - string of the integer
|
||||
* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
|
||||
* 'typeflag' - string of the char
|
||||
* 'linkname' - string
|
||||
* 'uname' - string
|
||||
* 'gname' - string
|
||||
* 'devmajor' - string of the integer
|
||||
* 'devminor' - string of the integer
|
||||
|
||||
For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
|
||||
headers) included after the above list. These xattrs key/values are first
|
||||
sorted by the keys.
|
||||
|
||||
#### Header Format
|
||||
|
||||
The ordered headers are written to the hash in the format of
|
||||
|
||||
"{.key}{.value}"
|
||||
|
||||
with no newline.
|
||||
|
||||
#### Body
|
||||
|
||||
After the order headers of the file have been added to the checksum for the
|
||||
file, the body of the file is written to the hash.
|
||||
|
||||
#### List of file sums
|
||||
|
||||
The list of file sums is sorted by the string of the hexadecimal digest.
|
||||
|
||||
If there are two files in the tar with matching paths, the order of occurrence
|
||||
for that path is reflected for the sums of the corresponding file header and
|
||||
body.
|
||||
|
||||
#### Final Checksum
|
||||
|
||||
Begin with a fresh or initial state of the associated hash cipher. If there is
|
||||
additional payload to include in the TarSum calculation for the archive, it is
|
||||
written first. Then each checksum from the ordered list of file sums is written
|
||||
to the hash.
|
||||
|
||||
The resulting digest is formatted per the Elements of TarSum checksum,
|
||||
including the TarSum version, the associated hash cipher and the hexadecimal
|
||||
encoded checksum digest.
|
||||
|
||||
## Security Considerations
|
||||
|
||||
The initial version of TarSum has undergone one update that could invalidate
|
||||
handcrafted tar archives. The tar archive format supports appending of files
|
||||
with same names as prior files in the archive. The latter file will clobber the
|
||||
prior file of the same path. Due to this the algorithm now accounts for files
|
||||
with matching paths, and orders the list of file sums accordingly [3].
|
||||
|
||||
## Footnotes
|
||||
|
||||
* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
|
||||
* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
|
||||
* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
|
||||
* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
|
||||
TarSum calculation.
|
||||
|
||||
|
|
@ -0,0 +1 @@
|
|||
This package provides helper functions to pack version information into a single User-Agent header.
|
||||
|
|
@ -1 +0,0 @@
|
|||
../CONTRIBUTING.md
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
# Contributing to Docker open source projects
|
||||
|
||||
Want to hack on go-events? Awesome! Here are instructions to get you started.
|
||||
|
||||
go-events is part of the [Docker](https://www.docker.com) project, and
|
||||
follows the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
For an in-depth description of our contribution process, visit the
|
||||
contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# go-events maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/go-events project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"aaronlehmann",
|
||||
"aluzzardi",
|
||||
"lk4d4",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.aaronlehmann]
|
||||
Name = "Aaron Lehmann"
|
||||
Email = "aaron.lehmann@docker.com"
|
||||
GitHub = "aaronlehmann"
|
||||
|
||||
[people.aluzzardi]
|
||||
Name = "Andrea Luzzardi"
|
||||
Email = "al@docker.com"
|
||||
GitHub = "aluzzardi"
|
||||
|
||||
[people.lk4d4]
|
||||
Name = "Alexander Morozov"
|
||||
Email = "lk4d4@docker.com"
|
||||
GitHub = "lk4d4"
|
||||
|
||||
[people.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
# Docker Events Package
|
||||
|
||||
[](https://godoc.org/github.com/docker/go-events)
|
||||
[](https://circleci.com/gh/docker/go-events)
|
||||
|
||||
The Docker `events` package implements a composable event distribution package
|
||||
for Go.
|
||||
|
||||
Originally created to implement the [notifications in Docker Registry
|
||||
2](https://github.com/docker/distribution/blob/master/docs/notifications.md),
|
||||
we've found the pattern to be useful in other applications. This package is
|
||||
most of the same code with slightly updated interfaces. Much of the internals
|
||||
have been made available.
|
||||
|
||||
## Usage
|
||||
|
||||
The `events` package centers around a `Sink` type. Events are written with
|
||||
calls to `Sink.Write(event Event)`. Sinks can be wired up in various
|
||||
configurations to achieve interesting behavior.
|
||||
|
||||
The canonical example is that employed by the
|
||||
[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications)
|
||||
package. Let's say we have a type `httpSink` where we'd like to queue
|
||||
notifications. As a rule, it should send a single http request and return an
|
||||
error if it fails:
|
||||
|
||||
```go
|
||||
func (h *httpSink) Write(event Event) error {
|
||||
p, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body := bytes.NewReader(p)
|
||||
resp, err := h.client.Post(h.url, "application/json", body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.Status != 200 {
|
||||
return errors.New("unexpected status")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// implement (*httpSink).Close()
|
||||
```
|
||||
|
||||
With just that, we can start using components from this package. One can call
|
||||
`(*httpSink).Write` to send events as the body of a post request to a
|
||||
configured URL.
|
||||
|
||||
### Retries
|
||||
|
||||
HTTP can be unreliable. The first feature we'd like is to have some retry:
|
||||
|
||||
```go
|
||||
hs := newHTTPSink(/*...*/)
|
||||
retry := NewRetryingSink(hs, NewBreaker(5, time.Second))
|
||||
```
|
||||
|
||||
We now have a sink that will retry events against the `httpSink` until they
|
||||
succeed. The retry will backoff for one second after 5 consecutive failures
|
||||
using the breaker strategy.
|
||||
|
||||
### Queues
|
||||
|
||||
This isn't quite enough. We we want a sink that doesn't block while we are
|
||||
waiting for events to be sent. Let's add a `Queue`:
|
||||
|
||||
```go
|
||||
queue := NewQueue(retry)
|
||||
```
|
||||
|
||||
Now, we have an unbounded queue that will work through all events sent with
|
||||
`(*Queue).Write`. Events can be added asynchronously to the queue without
|
||||
blocking the current execution path. This is ideal for use in an http request.
|
||||
|
||||
### Broadcast
|
||||
|
||||
It usually turns out that you want to send to more than one listener. We can
|
||||
use `Broadcaster` to support this:
|
||||
|
||||
```go
|
||||
var broadcast = NewBroadcaster() // make it available somewhere in your application.
|
||||
broadcast.Add(queue) // add your queue!
|
||||
broadcast.Add(queue2) // and another!
|
||||
```
|
||||
|
||||
With the above, we can now call `broadcast.Write` in our http handlers and have
|
||||
all the events distributed to each queue. Because the events are queued, not
|
||||
listener blocks another.
|
||||
|
||||
### Extending
|
||||
|
||||
For the most part, the above is sufficient for a lot of applications. However,
|
||||
extending the above functionality can be done implementing your own `Sink`. The
|
||||
behavior and semantics of the sink can be completely dependent on the
|
||||
application requirements. The interface is provided below for reference:
|
||||
|
||||
```go
|
||||
type Sink {
|
||||
Write(Event) error
|
||||
Close() error
|
||||
}
|
||||
```
|
||||
|
||||
Application behavior can be controlled by how `Write` behaves. The examples
|
||||
above are designed to queue the message and return as quickly as possible.
|
||||
Other implementations may block until the event is committed to durable
|
||||
storage.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License,
|
||||
Version 2.0. See [LICENSE](LICENSE) for the full license text.
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
# Contributing
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
|
@ -0,0 +1,91 @@
|
|||
# go-metrics [](https://godoc.org/github.com/docker/go-metrics) 
|
||||
|
||||
This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects.
|
||||
|
||||
## Best Practices
|
||||
|
||||
This packages is meant to be used for collecting metrics in Docker projects.
|
||||
It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected.
|
||||
If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/).
|
||||
|
||||
The following are a few Docker specific rules that will help you name and work with metrics in your project.
|
||||
|
||||
1. Namespace and Subsystem
|
||||
|
||||
This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics.
|
||||
|
||||
```go
|
||||
ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{
|
||||
"version": dockerversion.Version,
|
||||
"commit": dockerversion.GitCommit,
|
||||
})
|
||||
```
|
||||
|
||||
In the example above we are creating metrics for the Docker engine's daemon package.
|
||||
`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics.
|
||||
|
||||
A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting.
|
||||
|
||||
2. Declaring your Metrics
|
||||
|
||||
Try to keep all your metric declarations in one file.
|
||||
This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created.
|
||||
|
||||
3. Use labels instead of multiple metrics
|
||||
|
||||
Labels allow you to define one metric such as the time it takes to perform a certain action on an object.
|
||||
If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action.
|
||||
|
||||
|
||||
```go
|
||||
containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action")
|
||||
```
|
||||
|
||||
The last parameter is the label name or key.
|
||||
When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for.
|
||||
|
||||
```go
|
||||
containerActions.WithValues("create").UpdateSince(start)
|
||||
```
|
||||
|
||||
4. Always use a unit
|
||||
|
||||
The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with.
|
||||
For a timer, the standard unit is seconds and a counter's standard unit is a total.
|
||||
For gauges you must provide the unit.
|
||||
This package provides a standard set of units for use within the Docker projects.
|
||||
|
||||
```go
|
||||
Nanoseconds Unit = "nanoseconds"
|
||||
Seconds Unit = "seconds"
|
||||
Bytes Unit = "bytes"
|
||||
Total Unit = "total"
|
||||
```
|
||||
|
||||
If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds.
|
||||
|
||||
## Docs
|
||||
|
||||
Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics).
|
||||
|
||||
## HTTP Metrics
|
||||
|
||||
To instrument a http handler, you can wrap the code like this:
|
||||
|
||||
```go
|
||||
namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"})
|
||||
httpMetrics := namespace.NewDefaultHttpMetrics()
|
||||
metrics.Register(namespace)
|
||||
instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler)
|
||||
```
|
||||
Note: The `handler` label must be provided when a new namespace is created.
|
||||
|
||||
## Additional Metrics
|
||||
|
||||
Additional metrics are also defined here that are not available in the prometheus client.
|
||||
If you need a custom metrics and it is generic enough to be used by multiple projects, define it here.
|
||||
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
# Contributing to go-units
|
||||
|
||||
Want to hack on go-units? Awesome! Here are instructions to get you started.
|
||||
|
||||
go-units is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# go-units maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/go-units project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"akihirosuda",
|
||||
"dnephin",
|
||||
"thajeztah",
|
||||
"vdemeester",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
|
||||
[people.akihirosuda]
|
||||
Name = "Akihiro Suda"
|
||||
Email = "suda.akihiro@lab.ntt.co.jp"
|
||||
GitHub = "AkihiroSuda"
|
||||
|
||||
[people.dnephin]
|
||||
Name = "Daniel Nephin"
|
||||
Email = "dnephin@gmail.com"
|
||||
GitHub = "dnephin"
|
||||
|
||||
[people.thajeztah]
|
||||
Name = "Sebastiaan van Stijn"
|
||||
Email = "github@gone.nl"
|
||||
GitHub = "thaJeztah"
|
||||
|
||||
[people.vdemeester]
|
||||
Name = "Vincent Demeester"
|
||||
Email = "vincent@sbr.pm"
|
||||
GitHub = "vdemeester"
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
[](https://godoc.org/github.com/docker/go-units)
|
||||
|
||||
# Introduction
|
||||
|
||||
go-units is a library to transform human friendly measurements into machine friendly values.
|
||||
|
||||
## Usage
|
||||
|
||||
See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright © 2015 Docker, Inc.
|
||||
|
||||
go-units is licensed under the Apache License, Version 2.0.
|
||||
See [LICENSE](LICENSE) for the full text of the license.
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
dependencies:
|
||||
post:
|
||||
# install golint
|
||||
- go get github.com/golang/lint/golint
|
||||
|
||||
test:
|
||||
pre:
|
||||
# run analysis before tests
|
||||
- go vet ./...
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- test -z "$(gofmt -s -l . | tee /dev/stderr)"
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
### Notice
|
||||
|
||||
Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files.
|
||||
```
|
||||
$ make generate
|
||||
```
|
||||
|
||||
Click [here](https://github.com/google/protobuf) for more information about protobuf.
|
||||
|
||||
The `api.pb.txt` file contains merged descriptors of all defined services and messages.
|
||||
Definitions present here are considered frozen after the release.
|
||||
|
||||
At release time, the current `api.pb.txt` file will be moved into place to
|
||||
freeze the API changes for the minor version. For example, when 1.0.0 is
|
||||
released, `api.pb.txt` should be moved to `1.0.txt`. Notice that we leave off
|
||||
the patch number, since the API will be completely locked down for a given
|
||||
patch series.
|
||||
|
||||
We may find that by default, protobuf descriptors are too noisy to lock down
|
||||
API changes. In that case, we may filter out certain fields in the descriptors,
|
||||
possibly regenerating for old versions.
|
||||
|
||||
This process is similar to the [process used to ensure backwards compatibility
|
||||
in Go](https://github.com/golang/go/tree/master/api).
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,72 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package docker.swarmkit.v1;
|
||||
|
||||
import "github.com/docker/swarmkit/api/types.proto";
|
||||
import "github.com/docker/swarmkit/api/specs.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
||||
|
||||
// CA defines the RPC methods for requesting certificates from a CA.
|
||||
|
||||
service CA {
|
||||
rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
|
||||
};
|
||||
// GetUnlockKey returns the current unlock key for the cluster for the role of the client
|
||||
// asking.
|
||||
rpc GetUnlockKey(GetUnlockKeyRequest) returns (GetUnlockKeyResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: ["swarm-manager"] };
|
||||
};
|
||||
}
|
||||
|
||||
service NodeCA {
|
||||
rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
|
||||
};
|
||||
rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { insecure: true };
|
||||
};
|
||||
}
|
||||
|
||||
message NodeCertificateStatusRequest {
|
||||
string node_id = 1;
|
||||
}
|
||||
|
||||
message NodeCertificateStatusResponse {
|
||||
IssuanceStatus status = 1;
|
||||
Certificate certificate = 2;
|
||||
}
|
||||
|
||||
message IssueNodeCertificateRequest {
|
||||
// DEPRECATED: Role is now selected based on which secret is matched.
|
||||
NodeRole role = 1 [deprecated=true];
|
||||
|
||||
// CSR is the certificate signing request.
|
||||
bytes csr = 2 [(gogoproto.customname) = "CSR"];
|
||||
|
||||
// Token represents a user-provided string that is necessary for new
|
||||
// nodes to join the cluster
|
||||
string token = 3;
|
||||
|
||||
// Availability allows a user to control the current scheduling status of a node
|
||||
NodeSpec.Availability availability = 4;
|
||||
}
|
||||
|
||||
message IssueNodeCertificateResponse {
|
||||
string node_id = 1;
|
||||
NodeSpec.Membership node_membership = 2;
|
||||
}
|
||||
|
||||
message GetRootCACertificateRequest {}
|
||||
|
||||
message GetRootCACertificateResponse {
|
||||
bytes certificate = 1;
|
||||
}
|
||||
|
||||
message GetUnlockKeyRequest {}
|
||||
|
||||
message GetUnlockKeyResponse {
|
||||
bytes unlock_key = 1;
|
||||
Version version = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
|
@ -0,0 +1,554 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package docker.swarmkit.v1;
|
||||
|
||||
import "github.com/docker/swarmkit/api/specs.proto";
|
||||
import "github.com/docker/swarmkit/api/objects.proto";
|
||||
import "github.com/docker/swarmkit/api/types.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
||||
|
||||
// Control defines the RPC methods for controlling a cluster.
|
||||
service Control {
|
||||
rpc GetNode(GetNodeRequest) returns (GetNodeResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
rpc GetTask(GetTaskRequest) returns (GetTaskResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
rpc GetService(GetServiceRequest) returns (GetServiceResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc ListServices(ListServicesRequest) returns (ListServicesResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// --- secret APIs ---
|
||||
|
||||
// GetSecret returns a `GetSecretResponse` with a `Secret` with the same
|
||||
// id as `GetSecretRequest.SecretID`
|
||||
// - Returns `NotFound` if the Secret with the given id is not found.
|
||||
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty.
|
||||
// - Returns an error if getting fails.
|
||||
rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
|
||||
// UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same
|
||||
// id as `GetSecretRequest.SecretID`
|
||||
// - Returns `NotFound` if the Secret with the given id is not found.
|
||||
// - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty.
|
||||
// - Returns an error if updating fails.
|
||||
rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being
|
||||
// managed, or all secrets matching any name in `ListSecretsRequest.Names`, any
|
||||
// name prefix in `ListSecretsRequest.NamePrefixes`, any id in
|
||||
// `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`.
|
||||
// - Returns an error if listing fails.
|
||||
rpc ListSecrets(ListSecretsRequest) returns (ListSecretsResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
// CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based
|
||||
// on the provided `CreateSecretRequest.SecretSpec`.
|
||||
// - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed,
|
||||
// or if the secret data is too long or contains invalid characters.
|
||||
// - Returns an error if the creation fails.
|
||||
rpc CreateSecret(CreateSecretRequest) returns (CreateSecretResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
|
||||
// RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`.
|
||||
// - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty.
|
||||
// - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found.
|
||||
// - Returns an error if the deletion fails.
|
||||
rpc RemoveSecret(RemoveSecretRequest) returns (RemoveSecretResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
|
||||
// --- config APIs ---
|
||||
|
||||
// GetConfig returns a `GetConfigResponse` with a `Config` with the same
|
||||
// id as `GetConfigRequest.ConfigID`
|
||||
// - Returns `NotFound` if the Config with the given id is not found.
|
||||
// - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty.
|
||||
// - Returns an error if getting fails.
|
||||
rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
|
||||
// UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same
|
||||
// id as `GetConfigRequest.ConfigID`
|
||||
// - Returns `NotFound` if the Config with the given id is not found.
|
||||
// - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty.
|
||||
// - Returns an error if updating fails.
|
||||
rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// ListConfigs returns a `ListConfigResponse` with a list of `Config`s being
|
||||
// managed, or all configs matching any name in `ListConfigsRequest.Names`, any
|
||||
// name prefix in `ListConfigsRequest.NamePrefixes`, any id in
|
||||
// `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`.
|
||||
// - Returns an error if listing fails.
|
||||
rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
// CreateConfig creates and return a `CreateConfigResponse` with a `Config` based
|
||||
// on the provided `CreateConfigRequest.ConfigSpec`.
|
||||
// - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed,
|
||||
// or if the config data is too long or contains invalid characters.
|
||||
// - Returns an error if the creation fails.
|
||||
rpc CreateConfig(CreateConfigRequest) returns (CreateConfigResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
|
||||
// RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`.
|
||||
// - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty.
|
||||
// - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found.
|
||||
// - Returns an error if the deletion fails.
|
||||
rpc RemoveConfig(RemoveConfigRequest) returns (RemoveConfigResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
}
|
||||
}
|
||||
|
||||
message GetNodeRequest {
|
||||
string node_id = 1;
|
||||
}
|
||||
|
||||
message GetNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
message ListNodesRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
repeated NodeSpec.Membership memberships = 4 [packed=false];
|
||||
repeated NodeRole roles = 5 [packed=false];
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 6;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
message ListNodesResponse {
|
||||
repeated Node nodes = 1;
|
||||
}
|
||||
|
||||
// UpdateNodeRequest requests an update to the specified node. This may be used
|
||||
// to request a new availability for a node, such as PAUSE. Invalid updates
|
||||
// will be denied and cause an error.
|
||||
message UpdateNodeRequest {
|
||||
string node_id = 1;
|
||||
Version node_version = 2;
|
||||
NodeSpec spec = 3;
|
||||
}
|
||||
|
||||
message UpdateNodeResponse {
|
||||
Node node = 1;
|
||||
}
|
||||
|
||||
// RemoveNodeRequest requests to delete the specified node from store.
|
||||
message RemoveNodeRequest {
|
||||
string node_id = 1;
|
||||
bool force = 2;
|
||||
}
|
||||
|
||||
message RemoveNodeResponse {
|
||||
}
|
||||
|
||||
message GetTaskRequest {
|
||||
string task_id = 1;
|
||||
}
|
||||
|
||||
message GetTaskResponse {
|
||||
Task task = 1;
|
||||
}
|
||||
|
||||
message RemoveTaskRequest {
|
||||
string task_id = 1;
|
||||
}
|
||||
|
||||
message RemoveTaskResponse {
|
||||
}
|
||||
|
||||
message ListTasksRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
repeated string service_ids = 4;
|
||||
repeated string node_ids = 5;
|
||||
repeated docker.swarmkit.v1.TaskState desired_states = 6 [packed=false];
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 7;
|
||||
repeated string runtimes = 9;
|
||||
|
||||
// UpToDate matches tasks that are consistent with the current
|
||||
// service definition.
|
||||
// Note: this is intended for internal status reporting rather
|
||||
// than being exposed to users. It may be removed in the future.
|
||||
bool up_to_date = 8;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
message ListTasksResponse {
|
||||
repeated Task tasks = 1;
|
||||
}
|
||||
|
||||
message CreateServiceRequest {
|
||||
ServiceSpec spec = 1;
|
||||
}
|
||||
|
||||
message CreateServiceResponse {
|
||||
Service service = 1;
|
||||
}
|
||||
|
||||
message GetServiceRequest {
|
||||
string service_id = 1;
|
||||
bool insert_defaults = 2;
|
||||
}
|
||||
|
||||
message GetServiceResponse {
|
||||
Service service = 1;
|
||||
}
|
||||
|
||||
message UpdateServiceRequest {
|
||||
string service_id = 1;
|
||||
Version service_version = 2;
|
||||
ServiceSpec spec = 3;
|
||||
|
||||
enum Rollback {
|
||||
// This is not a rollback. The spec field of the request will
|
||||
// be honored.
|
||||
NONE = 0;
|
||||
|
||||
// Roll back the service - get spec from the service's
|
||||
// previous_spec.
|
||||
PREVIOUS = 1;
|
||||
}
|
||||
|
||||
// Rollback may be set to PREVIOUS to request a rollback (the service's
|
||||
// spec will be set to the value of its previous_spec field). In this
|
||||
// case, the spec field of this request is ignored.
|
||||
Rollback rollback = 4;
|
||||
}
|
||||
|
||||
message UpdateServiceResponse {
|
||||
Service service = 1;
|
||||
}
|
||||
|
||||
message RemoveServiceRequest {
|
||||
string service_id = 1;
|
||||
}
|
||||
|
||||
message RemoveServiceResponse {
|
||||
}
|
||||
|
||||
message ListServicesRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 4;
|
||||
repeated string runtimes = 5;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
message ListServicesResponse {
|
||||
repeated Service services = 1;
|
||||
}
|
||||
|
||||
message CreateNetworkRequest {
|
||||
NetworkSpec spec = 1;
|
||||
}
|
||||
|
||||
message CreateNetworkResponse {
|
||||
Network network = 1;
|
||||
}
|
||||
|
||||
message GetNetworkRequest {
|
||||
string name = 1;
|
||||
string network_id = 2;
|
||||
}
|
||||
|
||||
message GetNetworkResponse {
|
||||
Network network = 1;
|
||||
}
|
||||
|
||||
message RemoveNetworkRequest {
|
||||
string name = 1;
|
||||
string network_id = 2;
|
||||
}
|
||||
|
||||
message RemoveNetworkResponse {}
|
||||
|
||||
message ListNetworksRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 4;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
message ListNetworksResponse {
|
||||
repeated Network networks = 1;
|
||||
}
|
||||
|
||||
message GetClusterRequest {
|
||||
string cluster_id = 1;
|
||||
}
|
||||
|
||||
message GetClusterResponse {
|
||||
Cluster cluster = 1;
|
||||
}
|
||||
|
||||
message ListClustersRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
// NamePrefixes matches all objects with the given prefixes
|
||||
repeated string name_prefixes = 4;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
message ListClustersResponse {
|
||||
repeated Cluster clusters = 1;
|
||||
}
|
||||
|
||||
// KeyRotation tells UpdateCluster what items to rotate
|
||||
message KeyRotation {
|
||||
// WorkerJoinToken tells UpdateCluster to rotate the worker secret token.
|
||||
bool worker_join_token = 1;
|
||||
|
||||
// ManagerJoinToken tells UpdateCluster to rotate the manager secret token.
|
||||
bool manager_join_token = 2;
|
||||
|
||||
// ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key
|
||||
bool manager_unlock_key = 3;
|
||||
|
||||
}
|
||||
|
||||
message UpdateClusterRequest {
|
||||
// ClusterID is the cluster ID to update.
|
||||
string cluster_id = 1;
|
||||
|
||||
// ClusterVersion is the version of the cluster being updated.
|
||||
Version cluster_version = 2;
|
||||
|
||||
// Spec is the new spec to apply to the cluster.
|
||||
ClusterSpec spec = 3;
|
||||
|
||||
// Rotation contains flags for join token and unlock key rotation
|
||||
KeyRotation rotation = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateClusterResponse {
|
||||
Cluster cluster = 1;
|
||||
}
|
||||
|
||||
// GetSecretRequest is the request to get a `Secret` object given a secret id.
|
||||
message GetSecretRequest {
|
||||
string secret_id = 1;
|
||||
}
|
||||
|
||||
// GetSecretResponse contains the Secret corresponding to the id in
|
||||
// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret`
|
||||
// object should be nil instead of actually containing the secret bytes.
|
||||
message GetSecretResponse {
|
||||
Secret secret = 1;
|
||||
}
|
||||
|
||||
message UpdateSecretRequest {
|
||||
// SecretID is the secret ID to update.
|
||||
string secret_id = 1;
|
||||
|
||||
// SecretVersion is the version of the secret being updated.
|
||||
Version secret_version = 2;
|
||||
|
||||
// Spec is the new spec to apply to the Secret
|
||||
// Only some fields are allowed to be updated.
|
||||
SecretSpec spec = 3;
|
||||
}
|
||||
|
||||
message UpdateSecretResponse {
|
||||
Secret secret = 1;
|
||||
}
|
||||
|
||||
// ListSecretRequest is the request to list all non-internal secrets in the secret store,
|
||||
// or all secrets filtered by (name or name prefix or id prefix) and labels.
|
||||
message ListSecretsRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
repeated string name_prefixes = 4;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
// ListSecretResponse contains a list of all the secrets that match the name or
|
||||
// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data`
|
||||
// field in each `Secret` object should be nil instead of actually containing
|
||||
// the secret bytes.
|
||||
message ListSecretsResponse {
|
||||
repeated Secret secrets = 1;
|
||||
}
|
||||
|
||||
// CreateSecretRequest specifies a new secret (it will not update an existing
|
||||
// secret) to create.
|
||||
message CreateSecretRequest {
|
||||
SecretSpec spec = 1;
|
||||
}
|
||||
|
||||
// CreateSecretResponse contains the newly created `Secret` corresponding to the
|
||||
// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead
|
||||
// of actually containing the secret bytes.
|
||||
message CreateSecretResponse {
|
||||
Secret secret = 1;
|
||||
}
|
||||
|
||||
// RemoveSecretRequest contains the ID of the secret that should be removed. This
|
||||
// removes all versions of the secret.
|
||||
message RemoveSecretRequest {
|
||||
string secret_id = 1;
|
||||
}
|
||||
|
||||
// RemoveSecretResponse is an empty object indicating the successful removal of
|
||||
// a secret.
|
||||
message RemoveSecretResponse {}
|
||||
|
||||
// GetConfigRequest is the request to get a `Config` object given a config id.
|
||||
message GetConfigRequest {
|
||||
string config_id = 1;
|
||||
}
|
||||
|
||||
// GetConfigResponse contains the Config corresponding to the id in
|
||||
// `GetConfigRequest`.
|
||||
message GetConfigResponse {
|
||||
Config config = 1;
|
||||
}
|
||||
|
||||
message UpdateConfigRequest {
|
||||
// ConfigID is the config ID to update.
|
||||
string config_id = 1;
|
||||
|
||||
// ConfigVersion is the version of the config being updated.
|
||||
Version config_version = 2;
|
||||
|
||||
// Spec is the new spec to apply to the Config
|
||||
// Only some fields are allowed to be updated.
|
||||
ConfigSpec spec = 3;
|
||||
}
|
||||
|
||||
message UpdateConfigResponse {
|
||||
Config config = 1;
|
||||
}
|
||||
|
||||
// ListConfigRequest is the request to list all configs in the config store,
|
||||
// or all configs filtered by (name or name prefix or id prefix) and labels.
|
||||
message ListConfigsRequest {
|
||||
message Filters {
|
||||
repeated string names = 1;
|
||||
repeated string id_prefixes = 2;
|
||||
map<string, string> labels = 3;
|
||||
repeated string name_prefixes = 4;
|
||||
}
|
||||
|
||||
Filters filters = 1;
|
||||
}
|
||||
|
||||
// ListConfigResponse contains a list of all the configs that match the name or
|
||||
// name prefix filters provided in `ListConfigRequest`.
|
||||
message ListConfigsResponse {
|
||||
repeated Config configs = 1;
|
||||
}
|
||||
|
||||
// CreateConfigRequest specifies a new config (it will not update an existing
|
||||
// config) to create.
|
||||
message CreateConfigRequest {
|
||||
ConfigSpec spec = 1;
|
||||
}
|
||||
|
||||
// CreateConfigResponse contains the newly created `Config` corresponding to the
|
||||
// name in `CreateConfigRequest`.
|
||||
message CreateConfigResponse {
|
||||
Config config = 1;
|
||||
}
|
||||
|
||||
// RemoveConfigRequest contains the ID of the config that should be removed. This
|
||||
// removes all versions of the config.
|
||||
message RemoveConfigRequest {
|
||||
string config_id = 1;
|
||||
}
|
||||
|
||||
// RemoveConfigResponse is an empty object indicating the successful removal of
|
||||
// a config.
|
||||
message RemoveConfigResponse {}
|
||||
|
|
@ -0,0 +1,218 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package docker.swarmkit.v1;
|
||||
|
||||
import "github.com/docker/swarmkit/api/types.proto";
|
||||
import "github.com/docker/swarmkit/api/objects.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
// Dispatcher is the API provided by a manager group for agents to connect to. Agents
|
||||
// connect to this service to receive task assignments and report status.
|
||||
//
|
||||
// API methods on this service are used only by agent nodes.
|
||||
service Dispatcher { // maybe dispatch, al likes this
|
||||
// Session starts an agent session with the dispatcher. The session is
|
||||
// started after the first SessionMessage is received.
|
||||
//
|
||||
// Once started, the agent is controlled with a stream of SessionMessage.
|
||||
// Agents should list on the stream at all times for instructions.
|
||||
rpc Session(SessionRequest) returns (stream SessionMessage) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// Heartbeat is heartbeat method for nodes. It returns new TTL in response.
|
||||
// Node should send new heartbeat earlier than now + TTL, otherwise it will
|
||||
// be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
|
||||
rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// UpdateTaskStatus updates status of task. Node should send such updates
|
||||
// on every status change of its tasks.
|
||||
//
|
||||
// Whether receiving batch updates or single status updates, this method
|
||||
// should be accepting. Errors should only be returned if the entire update
|
||||
// should be retried, due to data loss or other problems.
|
||||
//
|
||||
// If a task is unknown the dispatcher, the status update should be
|
||||
// accepted regardless.
|
||||
rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||
};
|
||||
|
||||
// Tasks is a stream of tasks state for node. Each message contains full list
|
||||
// of tasks which should be run on node, if task is not present in that list,
|
||||
// it should be terminated.
|
||||
rpc Tasks(TasksRequest) returns (stream TasksMessage) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||
option deprecated = true;
|
||||
};
|
||||
|
||||
// Assignments is a stream of assignments such as tasks and secrets for node.
|
||||
// The first message in the stream contains all of the tasks and secrets
|
||||
// that are relevant to the node. Future messages in the stream are updates to
|
||||
// the set of assignments.
|
||||
rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" };
|
||||
};
|
||||
}
|
||||
|
||||
// SessionRequest starts a session.
|
||||
message SessionRequest {
|
||||
NodeDescription description = 1;
|
||||
// SessionID can be provided to attempt resuming an existing session. If the
|
||||
// SessionID is empty or invalid, a new SessionID will be assigned.
|
||||
//
|
||||
// See SessionMessage.SessionID for details.
|
||||
string session_id = 2;
|
||||
}
|
||||
|
||||
// SessionMessage instructs an agent on various actions as part of the current
|
||||
// session. An agent should act immediately on the contents.
|
||||
message SessionMessage {
|
||||
// SessionID is allocated after a successful registration. It should be
|
||||
// used on all RPC calls after registration. A dispatcher may choose to
|
||||
// change the SessionID, at which time an agent must re-register and obtain
|
||||
// a new one.
|
||||
//
|
||||
// All Dispatcher calls after register should include the SessionID. If the
|
||||
// Dispatcher so chooses, it may reject the call with an InvalidArgument
|
||||
// error code, at which time the agent should call Register to start a new
|
||||
// session.
|
||||
//
|
||||
// As a rule, once an agent has a SessionID, it should never save it to
|
||||
// disk or try to otherwise reuse. If the agent loses its SessionID, it
|
||||
// must start a new session through a call to Register. A Dispatcher may
|
||||
// choose to reuse the SessionID, if it sees fit, but it is not advised.
|
||||
//
|
||||
// The actual implementation of the SessionID is Dispatcher specific and
|
||||
// should be treated as opaque by agents.
|
||||
//
|
||||
// From a Dispatcher perspective, there are many ways to use the SessionID
|
||||
// to ensure uniqueness of a set of client RPC calls. One method is to keep
|
||||
// the SessionID unique to every call to Register in a single Dispatcher
|
||||
// instance. This ensures that the SessionID represents the unique
|
||||
// session from a single Agent to Manager. If the Agent restarts, we
|
||||
// allocate a new session, since the restarted Agent is not aware of the
|
||||
// new SessionID.
|
||||
//
|
||||
// The most compelling use case is to support duplicate node detection. If
|
||||
// one clones a virtual machine, including certificate material, two nodes
|
||||
// may end up with the same identity. This can also happen if two identical
|
||||
// agent processes are coming from the same node. If the SessionID is
|
||||
// replicated through the cluster, we can immediately detect the condition
|
||||
// and address it.
|
||||
//
|
||||
// Extending from the case above, we can actually detect a compromised
|
||||
// identity. Coupled with provisions to rebuild node identity, we can ban
|
||||
// the compromised node identity and have the nodes re-authenticate and
|
||||
// build a new identity. At this time, an administrator can then
|
||||
// re-authorize the compromised nodes, if it was a mistake or ensure that a
|
||||
// misbehaved node can no longer connect to the cluster.
|
||||
//
|
||||
// We considered placing this field in a GRPC header. Because this is a
|
||||
// critical feature of the protocol, we thought it should be represented
|
||||
// directly in the RPC message set.
|
||||
string session_id = 1;
|
||||
|
||||
// Node identifies the registering node.
|
||||
Node node = 2;
|
||||
|
||||
// Managers provides a weight list of alternative dispatchers
|
||||
repeated WeightedPeer managers = 3;
|
||||
|
||||
// Symmetric encryption key distributed by the lead manager. Used by agents
|
||||
// for securing network bootstrapping and communication.
|
||||
repeated EncryptionKey network_bootstrap_keys = 4;
|
||||
|
||||
// Which root certificates to trust
|
||||
bytes RootCA = 5;
|
||||
}
|
||||
|
||||
// HeartbeatRequest provides identifying properties for a single heartbeat.
|
||||
message HeartbeatRequest {
|
||||
string session_id = 1;
|
||||
}
|
||||
|
||||
message HeartbeatResponse {
|
||||
// Period is the duration to wait before sending the next heartbeat.
|
||||
// Well-behaved agents should update this on every heartbeat round trip.
|
||||
google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateTaskStatusRequest {
|
||||
// Tasks should contain all statuses for running tasks. Only the status
|
||||
// field must be set. The spec is not required.
|
||||
string session_id = 1;
|
||||
|
||||
message TaskStatusUpdate {
|
||||
string task_id = 1;
|
||||
TaskStatus status = 2;
|
||||
}
|
||||
|
||||
repeated TaskStatusUpdate updates = 3;
|
||||
}
|
||||
|
||||
message UpdateTaskStatusResponse{
|
||||
// void
|
||||
}
|
||||
|
||||
message TasksRequest {
|
||||
string session_id = 1;
|
||||
}
|
||||
|
||||
message TasksMessage {
|
||||
// Tasks is the set of tasks that should be running on the node.
|
||||
// Tasks outside of this set running on the node should be terminated.
|
||||
repeated Task tasks = 1;
|
||||
}
|
||||
|
||||
message AssignmentsRequest {
|
||||
string session_id = 1;
|
||||
}
|
||||
|
||||
message Assignment {
|
||||
oneof item {
|
||||
Task task = 1;
|
||||
Secret secret = 2;
|
||||
Config config = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message AssignmentChange {
|
||||
enum AssignmentAction {
|
||||
UPDATE = 0 [(gogoproto.enumvalue_customname) = "AssignmentActionUpdate"];
|
||||
REMOVE = 1 [(gogoproto.enumvalue_customname) = "AssignmentActionRemove"];
|
||||
}
|
||||
|
||||
Assignment assignment = 1;
|
||||
AssignmentAction action = 2;
|
||||
}
|
||||
|
||||
message AssignmentsMessage {
|
||||
// AssignmentType specifies whether this assignment message carries
|
||||
// the full state, or is an update to an existing state.
|
||||
enum Type {
|
||||
COMPLETE = 0;
|
||||
INCREMENTAL = 1;
|
||||
}
|
||||
|
||||
Type type = 1;
|
||||
|
||||
// AppliesTo references the previous ResultsIn value, to chain
|
||||
// incremental updates together. For the first update in a stream,
|
||||
// AppliesTo is empty. If AppliesTo does not match the previously
|
||||
// received ResultsIn, the consumer of the stream should start a new
|
||||
// Assignments stream to re-sync.
|
||||
string applies_to = 2;
|
||||
|
||||
// ResultsIn identifies the result of this assignments message, to
|
||||
// match against the next message's AppliesTo value and protect
|
||||
// against missed messages.
|
||||
string results_in = 3;
|
||||
|
||||
// AssignmentChange is a set of changes to apply on this node.
|
||||
repeated AssignmentChange changes = 4;
|
||||
}
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
syntax = "proto3";
|
||||
|
||||
// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto
|
||||
//
|
||||
// We use the same health check service proto description defined in the gRPC documentation,
|
||||
// including the authorization check. This requires our own implementation of the health
|
||||
// package located in `manager/health`.
|
||||
//
|
||||
// For more infos, refer to:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
|
||||
|
||||
package docker.swarmkit.v1;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto";
|
||||
|
||||
service Health {
|
||||
rpc Check(HealthCheckRequest) returns (HealthCheckResponse) {
|
||||
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
|
||||
};
|
||||
}
|
||||
|
||||
message HealthCheckRequest {
|
||||
string service = 1;
|
||||
}
|
||||
|
||||
message HealthCheckResponse {
|
||||
enum ServingStatus {
|
||||
UNKNOWN = 0;
|
||||
SERVING = 1;
|
||||
NOT_SERVING = 2;
|
||||
}
|
||||
ServingStatus status = 1;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue