Merge pull request #198 from jonjohnsonjr/basic-challenge
dep update go-containerregistry
This commit is contained in:
commit
a641ae0b10
|
|
@ -380,20 +380,20 @@
|
|||
branch = "master"
|
||||
name = "github.com/google/go-containerregistry"
|
||||
packages = [
|
||||
"authn",
|
||||
"name",
|
||||
"v1",
|
||||
"v1/empty",
|
||||
"v1/mutate",
|
||||
"v1/partial",
|
||||
"v1/random",
|
||||
"v1/remote",
|
||||
"v1/remote/transport",
|
||||
"v1/tarball",
|
||||
"v1/types",
|
||||
"v1/v1util"
|
||||
"pkg/authn",
|
||||
"pkg/name",
|
||||
"pkg/v1",
|
||||
"pkg/v1/empty",
|
||||
"pkg/v1/mutate",
|
||||
"pkg/v1/partial",
|
||||
"pkg/v1/random",
|
||||
"pkg/v1/remote",
|
||||
"pkg/v1/remote/transport",
|
||||
"pkg/v1/tarball",
|
||||
"pkg/v1/types",
|
||||
"pkg/v1/v1util"
|
||||
]
|
||||
revision = "ee5a6c257df843b47a2666ff0fff3d31d484ebda"
|
||||
revision = "5e2bd1f4bf61add62944828d54e239d352daaabf"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
|
|
@ -881,15 +881,9 @@
|
|||
revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0"
|
||||
source = "google.golang.org/grpc"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "7dc0b12e9e50b1e09cfb6bf10cbeca2299a3c12ef5dd1074666215f683969b10"
|
||||
inputs-digest = "3bffae5dc15e19a20ce63474c3472eab5ecac39bc4d56d3c1e17822f3cf328e4"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ArgCommand struct {
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,10 +16,11 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"testing"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var cmdTests = []struct {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package commands
|
|||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -17,15 +17,16 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var entrypointTests = []struct {
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,11 +16,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"testing"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
func Test_EnvExecute(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -18,12 +18,13 @@ package commands
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
func TestUpdateExposedPorts(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"testing"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
func TestUpdateLabels(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package commands
|
|||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var onbuildTests = []struct {
|
||||
|
|
|
|||
|
|
@ -17,16 +17,17 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type RunCommand struct {
|
||||
|
|
|
|||
|
|
@ -17,11 +17,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ShellCommand struct {
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var shellTests = []struct {
|
||||
|
|
|
|||
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/pkg/signal"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StopSignalCommand struct {
|
||||
|
|
|
|||
|
|
@ -16,11 +16,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"testing"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var stopsignalTests = []struct {
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type UserCommand struct {
|
||||
|
|
|
|||
|
|
@ -16,12 +16,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
var userTests = []struct {
|
||||
|
|
|
|||
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,12 +16,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
func TestUpdateVolume(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -17,13 +17,14 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,12 +16,13 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// Each test here changes the same WorkingDir field in the config
|
||||
|
|
|
|||
|
|
@ -18,19 +18,20 @@ package dockerfile
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/empty"
|
||||
"github.com/google/go-containerregistry/v1/remote"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
)
|
||||
|
||||
// Parse parses the contents of a Dockerfile and returns a list of commands
|
||||
|
|
|
|||
|
|
@ -27,15 +27,13 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/snapshot"
|
||||
|
||||
"github.com/google/go-containerregistry/v1/empty"
|
||||
|
||||
"github.com/google/go-containerregistry/v1/tarball"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/mutate"
|
||||
"github.com/google/go-containerregistry/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
|
||||
"io/ioutil"
|
||||
|
||||
|
|
@ -186,15 +184,12 @@ func DoPush(ref name.Reference, image v1.Image, destinations []string, tarPath s
|
|||
return tarball.WriteToFile(tarPath, destRef, image, nil)
|
||||
}
|
||||
|
||||
wo := remote.WriteOptions{}
|
||||
if ref != nil {
|
||||
wo.MountPaths = []name.Repository{ref.Context()}
|
||||
}
|
||||
pushAuth, err := authn.DefaultKeychain.Resolve(destRef.Context().Registry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wo := remote.WriteOptions{}
|
||||
err = remote.Write(destRef, image, pushAuth, http.DefaultTransport, wo)
|
||||
if err != nil {
|
||||
logrus.Error(fmt.Errorf("Failed to push to destination %s", destination))
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -17,18 +17,19 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerfile/shell"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerfile/shell"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ResolveEnvironmentReplacementList resolves a list of values by calling resolveEnvironmentReplacement
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// magicNotFoundMessage is the string that the CLI special cases to mean
|
||||
|
|
@ -72,17 +72,19 @@ func (h *helper) Authorization() (string, error) {
|
|||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
if err := h.r.Run(cmd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
output := out.String()
|
||||
err := h.r.Run(cmd)
|
||||
|
||||
// If we see this specific message, it means the domain wasn't found
|
||||
// and we should fall back on anonymous auth.
|
||||
output := out.String()
|
||||
if output == magicNotFoundMessage {
|
||||
return Anonymous.Authorization()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Any other output should be parsed as JSON and the Username / Secret
|
||||
// fields used for Basic authentication.
|
||||
ho := helperOutput{}
|
||||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"path"
|
||||
"runtime"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// Keychain is an interface for resolving an image reference to a credential.
|
||||
|
|
@ -15,7 +15,7 @@
|
|||
package empty
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/v1/random"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
)
|
||||
|
||||
// Image is a singleton empty image, think: FROM scratch.
|
||||
|
|
@ -15,7 +15,7 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Image defines the interface for interacting with an OCI v1 image.
|
||||
|
|
@ -18,7 +18,7 @@ import (
|
|||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Manifest represents the OCI image manifest in a structured way.
|
||||
|
|
@ -16,6 +16,7 @@ package mutate
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
|
@ -23,9 +24,9 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/partial"
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
const whiteoutPrefix = ".wh."
|
||||
|
|
@ -110,10 +111,17 @@ func Append(base v1.Image, adds ...Addendum) (v1.Image, error) {
|
|||
image.configFile.RootFS.DiffIDs = diffIDs
|
||||
image.configFile.History = history
|
||||
image.manifest.Layers = manifestLayers
|
||||
image.manifest.Config.Digest, err = image.ConfigName()
|
||||
|
||||
rcfg, err := image.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
image.manifest.Config.Digest = d
|
||||
image.manifest.Config.Size = sz
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
|
@ -136,9 +144,44 @@ func Config(base v1.Image, cfg v1.Config) (v1.Image, error) {
|
|||
Image: base,
|
||||
manifest: m.DeepCopy(),
|
||||
configFile: cf.DeepCopy(),
|
||||
diffIDMap: make(map[v1.Hash]v1.Layer),
|
||||
digestMap: make(map[v1.Hash]v1.Layer),
|
||||
}
|
||||
|
||||
rcfg, err := image.RawConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
image.manifest.Config.Digest = d
|
||||
image.manifest.Config.Size = sz
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// Created mutates the provided v1.Image to have the provided v1.Time
|
||||
func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) {
|
||||
m, err := base.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cf, err := base.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := cf.DeepCopy()
|
||||
cfg.Created = created
|
||||
|
||||
image := &image{
|
||||
Image: base,
|
||||
manifest: m.DeepCopy(),
|
||||
configFile: cfg,
|
||||
digestMap: make(map[v1.Hash]v1.Layer),
|
||||
}
|
||||
|
||||
image.manifest.Config.Digest, err = image.ConfigName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -17,8 +17,8 @@ package mutate
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
)
|
||||
|
||||
type RebaseOptions struct {
|
||||
|
|
@ -17,8 +17,8 @@ package partial
|
|||
import (
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
// CompressedLayer represents the bare minimum interface a natively
|
||||
|
|
@ -50,6 +50,11 @@ func (ule *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
|
|||
|
||||
// DiffID implements v1.Layer
|
||||
func (ule *compressedLayerExtender) DiffID() (v1.Hash, error) {
|
||||
// If our nested CompressedLayer implements DiffID,
|
||||
// then delegate to it instead.
|
||||
if wdi, ok := ule.CompressedLayer.(WithDiffID); ok {
|
||||
return wdi.DiffID()
|
||||
}
|
||||
r, err := ule.Uncompressed()
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
|
|
@ -15,7 +15,7 @@
|
|||
package partial
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// imageCore is the core set of properties without which we cannot build a v1.Image
|
||||
|
|
@ -19,9 +19,9 @@ import (
|
|||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
// UncompressedLayer represents the bare minimum interface a natively
|
||||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
// WithRawConfigFile defines the subset of v1.Image used by these helper methods
|
||||
|
|
@ -285,3 +285,8 @@ func UncompressedBlob(b WithBlob, h v1.Hash) (io.ReadCloser, error) {
|
|||
}
|
||||
return v1util.GunzipReadCloser(rc)
|
||||
}
|
||||
|
||||
// WithDiffID defines the subset of v1.Layer for exposing the DiffID method.
|
||||
type WithDiffID interface {
|
||||
DiffID() (v1.Hash, error)
|
||||
}
|
||||
|
|
@ -22,10 +22,10 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/partial"
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
// uncompressedLayer implements partial.UncompressedLayer from raw bytes.
|
||||
|
|
@ -20,9 +20,9 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// DeleteOptions are used to expose optional information to guide or
|
||||
|
|
@ -23,13 +23,13 @@ import (
|
|||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/partial"
|
||||
"github.com/google/go-containerregistry/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
// remoteImage accesses an image from a remote registry
|
||||
|
|
@ -51,10 +51,19 @@ func Image(ref name.Reference, auth authn.Authenticator, t http.RoundTripper) (v
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return partial.CompressedToImage(&remoteImage{
|
||||
img, err := partial.CompressedToImage(&remoteImage{
|
||||
ref: ref,
|
||||
client: &http.Client{Transport: tr},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Wrap the v1.Layers returned by this v1.Image in a hint for downstream
|
||||
// remote.Write calls to facilitate cross-repo "mounting".
|
||||
return &mountableImage{
|
||||
Image: img,
|
||||
Repository: ref.Context(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *remoteImage) url(resource, identifier string) url.URL {
|
||||
|
|
@ -191,6 +200,17 @@ func (rl *remoteLayer) Size() (int64, error) {
|
|||
return partial.BlobSize(rl, rl.digest)
|
||||
}
|
||||
|
||||
// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below.
|
||||
func (rl *remoteLayer) ConfigFile() (*v1.ConfigFile, error) {
|
||||
return partial.ConfigFile(rl.ri)
|
||||
}
|
||||
|
||||
// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have
|
||||
// available in our ConfigFile.
|
||||
func (rl *remoteLayer) DiffID() (v1.Hash, error) {
|
||||
return partial.BlobToDiffID(rl, rl.digest)
|
||||
}
|
||||
|
||||
// LayerByDigest implements partial.CompressedLayer
|
||||
func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) {
|
||||
return &remoteLayer{
|
||||
|
|
@ -20,9 +20,9 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
type Tags struct {
|
||||
77
vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
generated
vendored
Normal file
77
vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
|
||||
// "mounted" when published to another registry.
|
||||
type MountableLayer struct {
|
||||
v1.Layer
|
||||
|
||||
Repository name.Repository
|
||||
}
|
||||
|
||||
// mountableImage wraps the v1.Layer references returned by the embedded v1.Image
|
||||
// in MountableLayer's so that remote.Write might attempt to mount them from their
|
||||
// source repository.
|
||||
type mountableImage struct {
|
||||
v1.Image
|
||||
|
||||
Repository name.Repository
|
||||
}
|
||||
|
||||
// Layers implements v1.Image
|
||||
func (mi *mountableImage) Layers() ([]v1.Layer, error) {
|
||||
ls, err := mi.Image.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mls := make([]v1.Layer, 0, len(ls))
|
||||
for _, l := range ls {
|
||||
mls = append(mls, &MountableLayer{
|
||||
Layer: l,
|
||||
Repository: mi.Repository,
|
||||
})
|
||||
}
|
||||
return mls, nil
|
||||
}
|
||||
|
||||
// LayerByDigest implements v1.Image
|
||||
func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) {
|
||||
l, err := mi.Image.LayerByDigest(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Repository: mi.Repository,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LayerByDiffID implements v1.Image
|
||||
func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) {
|
||||
l, err := mi.Image.LayerByDiffID(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Repository: mi.Repository,
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@ package transport
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
)
|
||||
|
||||
type basicTransport struct {
|
||||
|
|
@ -22,8 +22,8 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type bearerTransport struct {
|
||||
|
|
@ -19,15 +19,15 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type challenge string
|
||||
|
||||
const (
|
||||
anonymous challenge = "Anonymous"
|
||||
basic challenge = "Basic"
|
||||
bearer challenge = "Bearer"
|
||||
anonymous challenge = "anonymous"
|
||||
basic challenge = "basic"
|
||||
bearer challenge = "bearer"
|
||||
)
|
||||
|
||||
type pingResp struct {
|
||||
|
|
@ -38,6 +38,10 @@ type pingResp struct {
|
|||
parameters map[string]string
|
||||
}
|
||||
|
||||
func (c challenge) Canonical() challenge {
|
||||
return challenge(strings.ToLower(string(c)))
|
||||
}
|
||||
|
||||
func parseChallenge(suffix string) map[string]string {
|
||||
kv := make(map[string]string)
|
||||
for _, token := range strings.Split(suffix, ",") {
|
||||
|
|
@ -75,13 +79,13 @@ func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
|||
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
|
||||
// If there are two parts, then parse the challenge parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(strings.Title(parts[0])),
|
||||
challenge: challenge(parts[0]).Canonical(),
|
||||
parameters: parseChallenge(parts[1]),
|
||||
}, nil
|
||||
}
|
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(strings.Title(wac)),
|
||||
challenge: challenge(wac).Canonical(),
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status)
|
||||
|
|
@ -18,7 +18,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// Detect more complex forms of local references.
|
||||
|
|
@ -18,8 +18,8 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -49,7 +49,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope
|
|||
return nil, err
|
||||
}
|
||||
|
||||
switch pr.challenge {
|
||||
switch pr.challenge.Canonical() {
|
||||
case anonymous:
|
||||
return t, nil
|
||||
case basic:
|
||||
|
|
@ -22,17 +22,15 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/authn"
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// WriteOptions are used to expose optional information to guide or
|
||||
// control the image write.
|
||||
type WriteOptions struct {
|
||||
// The set of paths from which to attempt to mount blobs.
|
||||
MountPaths []name.Repository
|
||||
// TODO(mattmoor): Expose "threads" to limit parallelism?
|
||||
}
|
||||
|
||||
|
|
@ -40,9 +38,15 @@ type WriteOptions struct {
|
|||
func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper,
|
||||
wo WriteOptions) error {
|
||||
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
for _, mp := range wo.MountPaths {
|
||||
scopes = append(scopes, mp.Scope(transport.PullScope))
|
||||
for _, l := range ls {
|
||||
if ml, ok := l.(*MountableLayer); ok {
|
||||
scopes = append(scopes, ml.Repository.Scope(transport.PullScope))
|
||||
}
|
||||
}
|
||||
|
||||
tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
|
||||
|
|
@ -132,16 +136,16 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e
|
|||
uv := url.Values{
|
||||
"mount": []string{h.String()},
|
||||
}
|
||||
var from []string
|
||||
for _, m := range w.options.MountPaths {
|
||||
from = append(from, m.RepositoryStr())
|
||||
l, err := w.img.LayerByDigest(h)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
// We currently avoid HEAD because it's semi-redundant with the mount that is part
|
||||
// of initiating the blob upload. GCR will perform an existence check on the initiation
|
||||
// if "mount" is specified, even if no "from" sources are specified. If this turns out
|
||||
// to not be broadly applicable then we should replace mounts without "from"s with a HEAD.
|
||||
if len(from) > 0 {
|
||||
uv["from"] = from
|
||||
if ml, ok := l.(*MountableLayer); ok {
|
||||
uv["from"] = []string{ml.Repository.RepositoryStr()}
|
||||
}
|
||||
u.RawQuery = uv.Encode()
|
||||
|
||||
|
|
@ -286,7 +290,7 @@ func (w *writer) commitImage() error {
|
|||
}
|
||||
|
||||
// The image was successfully pushed!
|
||||
fmt.Printf("%v: digest: %v size: %d\n", w.ref, digest, len(raw))
|
||||
log.Printf("%v: digest: %v size: %d", w.ref, digest, len(raw))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -25,11 +25,11 @@ import (
|
|||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/partial"
|
||||
"github.com/google/go-containerregistry/v1/types"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
type image struct {
|
||||
|
|
@ -298,7 +298,7 @@ type compressedLayerFromTarball struct {
|
|||
filePath string
|
||||
}
|
||||
|
||||
// DiffID implements partial.CompressedLayer
|
||||
// Digest implements partial.CompressedLayer
|
||||
func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) {
|
||||
return clft.digest, nil
|
||||
}
|
||||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/v1/v1util"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||
)
|
||||
|
||||
type layer struct {
|
||||
|
|
@ -22,8 +22,8 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/name"
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// WriteOptions are used to expose optional information to guide or
|
||||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
type verifyReader struct {
|
||||
|
|
@ -24,14 +24,23 @@ var gzipMagicHeader = []byte{'\x1f', '\x8b'}
|
|||
|
||||
// GzipReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// This uses gzip.BestSpeed for the compression level.
|
||||
func GzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
|
||||
return GzipReadCloserLevel(r, gzip.BestSpeed)
|
||||
}
|
||||
|
||||
// GzipReadCloserLevel reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// Refer to compress/gzip for the level:
|
||||
// https://golang.org/pkg/compress/gzip/#pkg-constants
|
||||
func GzipReadCloserLevel(r io.ReadCloser, level int) (io.ReadCloser, error) {
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
defer r.Close()
|
||||
|
||||
gw, _ := gzip.NewWriterLevel(pw, gzip.BestCompression)
|
||||
gw, _ := gzip.NewWriterLevel(pw, level)
|
||||
defer gw.Close()
|
||||
|
||||
_, err := io.Copy(gw, r)
|
||||
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -1,739 +0,0 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
||||
|
|
@ -1,775 +0,0 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
// For an alias node, alias holds the resolved alias.
|
||||
alias *node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newParserFromReader(r io.Reader) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
yaml_parser_set_input_reader(&p.parser, r)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) init() {
|
||||
if p.doneInit {
|
||||
return
|
||||
}
|
||||
p.expect(yaml_STREAM_START_EVENT)
|
||||
p.doneInit = true
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
// expect consumes an event from the event stream and
|
||||
// checks that it's of the expected type.
|
||||
func (p *parser) expect(e yaml_event_type_t) {
|
||||
if p.event.typ == yaml_NO_EVENT {
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
if p.event.typ != e {
|
||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||||
p.fail()
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
p.event.typ = yaml_NO_EVENT
|
||||
}
|
||||
|
||||
// peek peeks at the next event in the event stream,
|
||||
// puts the results into p.event and returns the event type.
|
||||
func (p *parser) peek() yaml_event_type_t {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
return p.event.typ
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
return p.event.typ
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
var where string
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.parser.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
}
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
if line != 0 {
|
||||
where = "line " + strconv.Itoa(line) + ": "
|
||||
}
|
||||
var msg string
|
||||
if len(p.parser.problem) > 0 {
|
||||
msg = p.parser.problem
|
||||
} else {
|
||||
msg = "unknown problem parsing YAML content"
|
||||
}
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
p.init()
|
||||
switch p.peek() {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
return p.alias()
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return p.mapping()
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return p.sequence()
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return p.document()
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
||||
n.children = append(n.children, p.parse())
|
||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
n.alias = p.doc.anchors[n.value]
|
||||
if n.alias == nil {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
p.expect(yaml_ALIAS_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SCALAR_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_MAPPING_START_EVENT)
|
||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.expect(yaml_MAPPING_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[*node]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
}
|
||||
|
||||
var (
|
||||
mapItemType = reflect.TypeOf(MapItem{})
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
||||
)
|
||||
|
||||
func newDecoder(strict bool) *decoder {
|
||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
||||
d.aliases = make(map[*node]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
}
|
||||
value := n.value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
} else {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d.unmarshal(n, reflect.ValueOf(v))
|
||||
if len(d.terrors) > terrlen {
|
||||
issues := d.terrors[terrlen:]
|
||||
d.terrors = d.terrors[:terrlen]
|
||||
return &TypeError{issues}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if e, ok := err.(*TypeError); ok {
|
||||
d.terrors = append(d.terrors, e.Errors...)
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||
// if a value is found to implement it.
|
||||
// It returns the initialized and dereferenced out value, whether
|
||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
if out.Kind() == reflect.Ptr {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(out.Type().Elem()))
|
||||
}
|
||||
out = out.Elem()
|
||||
again = true
|
||||
}
|
||||
if out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||
good = d.callUnmarshaler(n, u)
|
||||
return out, true, good
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
if d.aliases[n] {
|
||||
// TODO this could actually be allowed in some circumstances.
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
good = d.unmarshal(n.alias, out)
|
||||
delete(d.aliases, n)
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
failf("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
return true
|
||||
}
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
// We've resolved to exactly the type we want, so use that.
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
// Perhaps we can use the value as a TextUnmarshaler to
|
||||
// set its value.
|
||||
if out.CanAddr() {
|
||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
if ok {
|
||||
var text []byte
|
||||
if tag == yaml_BINARY_TAG {
|
||||
text = []byte(resolved.(string))
|
||||
} else {
|
||||
// We let any value be unmarshaled into TextUnmarshaler.
|
||||
// That might be more lax than we'd like, but the
|
||||
// TextUnmarshaler itself should bowl out any dubious values.
|
||||
text = []byte(n.value)
|
||||
}
|
||||
err := u.UnmarshalText(text)
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
return true
|
||||
}
|
||||
if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
return true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else if tag == yaml_TIMESTAMP_TAG {
|
||||
// It looks like a timestamp but for backward compatibility
|
||||
// reasons we set it as a string, so that code that unmarshals
|
||||
// timestamp-like values into interface{} will continue to
|
||||
// see a string and not a time.Time.
|
||||
// TODO(v3) Drop this.
|
||||
out.Set(reflect.ValueOf(n.value))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Struct:
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
d.terror(n, tag, out)
|
||||
return false
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Array:
|
||||
if l != out.Len() {
|
||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
out = settableValueOf(make([]interface{}, l))
|
||||
default:
|
||||
d.terror(n, yaml_SEQ_TAG, out)
|
||||
return false
|
||||
}
|
||||
et := out.Type().Elem()
|
||||
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
}
|
||||
if out.Kind() != reflect.Array {
|
||||
out.Set(out.Slice(0, j))
|
||||
}
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
case reflect.Slice:
|
||||
return d.mappingSlice(n, out)
|
||||
case reflect.Map:
|
||||
// okay
|
||||
case reflect.Interface:
|
||||
if d.mapType.Kind() == reflect.Map {
|
||||
iface := out
|
||||
out = reflect.MakeMap(d.mapType)
|
||||
iface.Set(out)
|
||||
} else {
|
||||
slicev := reflect.New(d.mapType).Elem()
|
||||
if !d.mappingSlice(n, slicev) {
|
||||
return false
|
||||
}
|
||||
out.Set(slicev)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
outt := out.Type()
|
||||
kt := outt.Key()
|
||||
et := outt.Elem()
|
||||
|
||||
mapType := d.mapType
|
||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||
d.mapType = outt
|
||||
}
|
||||
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
d.setMapIndex(n.children[i+1], out, k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
||||
if d.strict && out.MapIndex(k) != zeroValue {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
||||
return
|
||||
}
|
||||
out.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
|
||||
mapType := d.mapType
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
var doneFields []bool
|
||||
if d.strict {
|
||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
||||
}
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
if d.strict {
|
||||
if doneFields[info.Id] {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
||||
continue
|
||||
}
|
||||
doneFields[info.Id] = true
|
||||
}
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
||||
} else if d.strict {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
failWantMap()
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,362 +0,0 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
// doneInit holds whether the initial stream_start_event has been
|
||||
// emitted.
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch m := iface.(type) {
|
||||
case time.Time, *time.Time:
|
||||
// Although time.Time implements TextMarshaler,
|
||||
// we don't want to treat it as a string for YAML
|
||||
// purposes because YAML has special support for
|
||||
// timestamps.
|
||||
case Marshaler:
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.Type() == ptrTimeType {
|
||||
e.timev(tag, in.Elem())
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if in.Type() == timeType {
|
||||
e.timev(tag, in)
|
||||
} else {
|
||||
e.structv(tag, in)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
e.slicev(tag, in)
|
||||
}
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||
for _, item := range slice {
|
||||
e.marshal("", reflect.ValueOf(item.Key))
|
||||
e.marshal("", reflect.ValueOf(item.Value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = in.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = yaml_BINARY_TAG
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,412 +0,0 @@
|
|||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue