1042 lines
		
	
	
		
			30 KiB
		
	
	
	
		
			Go
		
	
	
	
			
		
		
	
	
			1042 lines
		
	
	
		
			30 KiB
		
	
	
	
		
			Go
		
	
	
	
| package storage
 | |
| 
 | |
| import (
 | |
| 	"bytes"
 | |
| 	"compress/gzip"
 | |
| 	"encoding/json"
 | |
| 	"io"
 | |
| 	"io/ioutil"
 | |
| 	"os"
 | |
| 	"path/filepath"
 | |
| 	"time"
 | |
| 
 | |
| 	drivers "github.com/containers/storage/drivers"
 | |
| 	"github.com/containers/storage/pkg/archive"
 | |
| 	"github.com/containers/storage/pkg/ioutils"
 | |
| 	"github.com/containers/storage/pkg/stringid"
 | |
| 	"github.com/containers/storage/pkg/truncindex"
 | |
| 	digest "github.com/opencontainers/go-digest"
 | |
| 	"github.com/pkg/errors"
 | |
| 	"github.com/vbatts/tar-split/tar/asm"
 | |
| 	"github.com/vbatts/tar-split/tar/storage"
 | |
| )
 | |
| 
 | |
| const (
 | |
| 	tarSplitSuffix  = ".tar-split.gz"
 | |
| 	incompleteFlag  = "incomplete"
 | |
| 	compressionFlag = "diff-compression"
 | |
| )
 | |
| 
 | |
| // A Layer is a record of a copy-on-write layer that's stored by the lower
 | |
| // level graph driver.
 | |
| type Layer struct {
 | |
| 	// ID is either one which was specified at create-time, or a random
 | |
| 	// value which was generated by the library.
 | |
| 	ID string `json:"id"`
 | |
| 
 | |
| 	// Names is an optional set of user-defined convenience values.  The
 | |
| 	// layer can be referred to by its ID or any of its names.  Names are
 | |
| 	// unique among layers.
 | |
| 	Names []string `json:"names,omitempty"`
 | |
| 
 | |
| 	// Parent is the ID of a layer from which this layer inherits data.
 | |
| 	Parent string `json:"parent,omitempty"`
 | |
| 
 | |
| 	// Metadata is data we keep for the convenience of the caller.  It is not
 | |
| 	// expected to be large, since it is kept in memory.
 | |
| 	Metadata string `json:"metadata,omitempty"`
 | |
| 
 | |
| 	// MountLabel is an SELinux label which should be used when attempting to mount
 | |
| 	// the layer.
 | |
| 	MountLabel string `json:"mountlabel,omitempty"`
 | |
| 
 | |
| 	// MountPoint is the path where the layer is mounted, or where it was most
 | |
| 	// recently mounted.  This can change between subsequent Unmount() and
 | |
| 	// Mount() calls, so the caller should consult this value after Mount()
 | |
| 	// succeeds to find the location of the container's root filesystem.
 | |
| 	MountPoint string `json:"-"`
 | |
| 
 | |
| 	// MountCount is used as a reference count for the container's layer being
 | |
| 	// mounted at the mount point.
 | |
| 	MountCount int `json:"-"`
 | |
| 
 | |
| 	// Created is the datestamp for when this layer was created.  Older
 | |
| 	// versions of the library did not track this information, so callers
 | |
| 	// will likely want to use the IsZero() method to verify that a value
 | |
| 	// is set before using it.
 | |
| 	Created time.Time `json:"created,omitempty"`
 | |
| 
 | |
| 	// CompressedDigest is the digest of the blob that was last passed to
 | |
| 	// ApplyDiff() or Put(), as it was presented to us.
 | |
| 	CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"`
 | |
| 
 | |
| 	// CompressedSize is the length of the blob that was last passed to
 | |
| 	// ApplyDiff() or Put(), as it was presented to us.  If
 | |
| 	// CompressedDigest is not set, this should be treated as if it were an
 | |
| 	// uninitialized value.
 | |
| 	CompressedSize int64 `json:"compressed-size,omitempty"`
 | |
| 
 | |
| 	// UncompressedDigest is the digest of the blob that was last passed to
 | |
| 	// ApplyDiff() or Put(), after we decompressed it.  Often referred to
 | |
| 	// as a DiffID.
 | |
| 	UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
 | |
| 
 | |
| 	// UncompressedSize is the length of the blob that was last passed to
 | |
| 	// ApplyDiff() or Put(), after we decompressed it.  If
 | |
| 	// UncompressedDigest is not set, this should be treated as if it were
 | |
| 	// an uninitialized value.
 | |
| 	UncompressedSize int64 `json:"diff-size,omitempty"`
 | |
| 
 | |
| 	// CompressionType is the type of compression which we detected on the blob
 | |
| 	// that was last passed to ApplyDiff() or Put().
 | |
| 	CompressionType archive.Compression `json:"compression,omitempty"`
 | |
| 
 | |
| 	// Flags is arbitrary data about the layer.
 | |
| 	Flags map[string]interface{} `json:"flags,omitempty"`
 | |
| }
 | |
| 
 | |
| type layerMountPoint struct {
 | |
| 	ID         string `json:"id"`
 | |
| 	MountPoint string `json:"path"`
 | |
| 	MountCount int    `json:"count"`
 | |
| }
 | |
| 
 | |
| // DiffOptions override the default behavior of Diff() methods.
 | |
| type DiffOptions struct {
 | |
| 	// Compression, if set overrides the default compressor when generating a diff.
 | |
| 	Compression *archive.Compression
 | |
| }
 | |
| 
 | |
| // ROLayerStore wraps a graph driver, adding the ability to refer to layers by
 | |
| // name, and keeping track of parent-child relationships, along with a list of
 | |
| // all known layers.
 | |
| type ROLayerStore interface {
 | |
| 	ROFileBasedStore
 | |
| 	ROMetadataStore
 | |
| 
 | |
| 	// Exists checks if a layer with the specified name or ID is known.
 | |
| 	Exists(id string) bool
 | |
| 
 | |
| 	// Get retrieves information about a layer given an ID or name.
 | |
| 	Get(id string) (*Layer, error)
 | |
| 
 | |
| 	// Status returns an slice of key-value pairs, suitable for human consumption,
 | |
| 	// relaying whatever status information the underlying driver can share.
 | |
| 	Status() ([][2]string, error)
 | |
| 
 | |
| 	// Changes returns a slice of Change structures, which contain a pathname
 | |
| 	// (Path) and a description of what sort of change (Kind) was made by the
 | |
| 	// layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a
 | |
| 	// specified layer.  By default, the layer's parent is used as a reference.
 | |
| 	Changes(from, to string) ([]archive.Change, error)
 | |
| 
 | |
| 	// Diff produces a tarstream which can be applied to a layer with the contents
 | |
| 	// of the first layer to produce a layer with the contents of the second layer.
 | |
| 	// By default, the parent of the second layer is used as the first
 | |
| 	// layer, so it need not be specified.  Options can be used to override
 | |
| 	// default behavior, but are also not required.
 | |
| 	Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
 | |
| 
 | |
| 	// DiffSize produces an estimate of the length of the tarstream which would be
 | |
| 	// produced by Diff.
 | |
| 	DiffSize(from, to string) (int64, error)
 | |
| 
 | |
| 	// Size produces a cached value for the uncompressed size of the layer,
 | |
| 	// if one is known, or -1 if it is not known.  If the layer can not be
 | |
| 	// found, it returns an error.
 | |
| 	Size(name string) (int64, error)
 | |
| 
 | |
| 	// Lookup attempts to translate a name to an ID.  Most methods do this
 | |
| 	// implicitly.
 | |
| 	Lookup(name string) (string, error)
 | |
| 
 | |
| 	// LayersByCompressedDigest returns a slice of the layers with the
 | |
| 	// specified compressed digest value recorded for them.
 | |
| 	LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
 | |
| 
 | |
| 	// LayersByUncompressedDigest returns a slice of the layers with the
 | |
| 	// specified uncompressed digest value recorded for them.
 | |
| 	LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
 | |
| 
 | |
| 	// Layers returns a slice of the known layers.
 | |
| 	Layers() ([]Layer, error)
 | |
| }
 | |
| 
 | |
| // LayerStore wraps a graph driver, adding the ability to refer to layers by
 | |
| // name, and keeping track of parent-child relationships, along with a list of
 | |
| // all known layers.
 | |
| type LayerStore interface {
 | |
| 	ROLayerStore
 | |
| 	RWFileBasedStore
 | |
| 	RWMetadataStore
 | |
| 	FlaggableStore
 | |
| 
 | |
| 	// Create creates a new layer, optionally giving it a specified ID rather than
 | |
| 	// a randomly-generated one, either inheriting data from another specified
 | |
| 	// layer or the empty base layer.  The new layer can optionally be given names
 | |
| 	// and have an SELinux label specified for use when mounting it.  Some
 | |
| 	// underlying drivers can accept a "size" option.  At this time, most
 | |
| 	// underlying drivers do not themselves distinguish between writeable
 | |
| 	// and read-only layers.
 | |
| 	Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error)
 | |
| 
 | |
| 	// CreateWithFlags combines the functions of Create and SetFlag.
 | |
| 	CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error)
 | |
| 
 | |
| 	// Put combines the functions of CreateWithFlags and ApplyDiff.
 | |
| 	Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error)
 | |
| 
 | |
| 	// SetNames replaces the list of names associated with a layer with the
 | |
| 	// supplied values.
 | |
| 	SetNames(id string, names []string) error
 | |
| 
 | |
| 	// Delete deletes a layer with the specified name or ID.
 | |
| 	Delete(id string) error
 | |
| 
 | |
| 	// Wipe deletes all layers.
 | |
| 	Wipe() error
 | |
| 
 | |
| 	// Mount mounts a layer for use.  If the specified layer is the parent of other
 | |
| 	// layers, it should not be written to.  An SELinux label to be applied to the
 | |
| 	// mount can be specified to override the one configured for the layer.
 | |
| 	Mount(id, mountLabel string) (string, error)
 | |
| 
 | |
| 	// Unmount unmounts a layer when it is no longer in use.
 | |
| 	Unmount(id string) error
 | |
| 
 | |
| 	// ApplyDiff reads a tarstream which was created by a previous call to Diff and
 | |
| 	// applies its changes to a specified layer.
 | |
| 	ApplyDiff(to string, diff io.Reader) (int64, error)
 | |
| }
 | |
| 
 | |
| type layerStore struct {
 | |
| 	lockfile          Locker
 | |
| 	rundir            string
 | |
| 	driver            drivers.Driver
 | |
| 	layerdir          string
 | |
| 	layers            []*Layer
 | |
| 	idindex           *truncindex.TruncIndex
 | |
| 	byid              map[string]*Layer
 | |
| 	byname            map[string]*Layer
 | |
| 	bymount           map[string]*Layer
 | |
| 	bycompressedsum   map[digest.Digest][]string
 | |
| 	byuncompressedsum map[digest.Digest][]string
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Layers() ([]Layer, error) {
 | |
| 	layers := make([]Layer, len(r.layers))
 | |
| 	for i := range r.layers {
 | |
| 		layers[i] = *(r.layers[i])
 | |
| 	}
 | |
| 	return layers, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) mountspath() string {
 | |
| 	return filepath.Join(r.rundir, "mountpoints.json")
 | |
| }
 | |
| 
 | |
| func (r *layerStore) layerspath() string {
 | |
| 	return filepath.Join(r.layerdir, "layers.json")
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Load() error {
 | |
| 	shouldSave := false
 | |
| 	rpath := r.layerspath()
 | |
| 	data, err := ioutil.ReadFile(rpath)
 | |
| 	if err != nil && !os.IsNotExist(err) {
 | |
| 		return err
 | |
| 	}
 | |
| 	layers := []*Layer{}
 | |
| 	idlist := []string{}
 | |
| 	ids := make(map[string]*Layer)
 | |
| 	names := make(map[string]*Layer)
 | |
| 	mounts := make(map[string]*Layer)
 | |
| 	compressedsums := make(map[digest.Digest][]string)
 | |
| 	uncompressedsums := make(map[digest.Digest][]string)
 | |
| 	if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil {
 | |
| 		idlist = make([]string, 0, len(layers))
 | |
| 		for n, layer := range layers {
 | |
| 			ids[layer.ID] = layers[n]
 | |
| 			idlist = append(idlist, layer.ID)
 | |
| 			for _, name := range layer.Names {
 | |
| 				if conflict, ok := names[name]; ok {
 | |
| 					r.removeName(conflict, name)
 | |
| 					shouldSave = true
 | |
| 				}
 | |
| 				names[name] = layers[n]
 | |
| 			}
 | |
| 			if layer.CompressedDigest != "" {
 | |
| 				compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID)
 | |
| 			}
 | |
| 			if layer.UncompressedDigest != "" {
 | |
| 				uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	if shouldSave && !r.IsReadWrite() {
 | |
| 		return ErrDuplicateLayerNames
 | |
| 	}
 | |
| 	mpath := r.mountspath()
 | |
| 	data, err = ioutil.ReadFile(mpath)
 | |
| 	if err != nil && !os.IsNotExist(err) {
 | |
| 		return err
 | |
| 	}
 | |
| 	layerMounts := []layerMountPoint{}
 | |
| 	if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
 | |
| 		for _, mount := range layerMounts {
 | |
| 			if mount.MountPoint != "" {
 | |
| 				if layer, ok := ids[mount.ID]; ok {
 | |
| 					mounts[mount.MountPoint] = layer
 | |
| 					layer.MountPoint = mount.MountPoint
 | |
| 					layer.MountCount = mount.MountCount
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	r.layers = layers
 | |
| 	r.idindex = truncindex.NewTruncIndex(idlist)
 | |
| 	r.byid = ids
 | |
| 	r.byname = names
 | |
| 	r.bymount = mounts
 | |
| 	r.bycompressedsum = compressedsums
 | |
| 	r.byuncompressedsum = uncompressedsums
 | |
| 	err = nil
 | |
| 	// Last step: if we're writable, try to remove anything that a previous
 | |
| 	// user of this storage area marked for deletion but didn't manage to
 | |
| 	// actually delete.
 | |
| 	if r.IsReadWrite() {
 | |
| 		for _, layer := range r.layers {
 | |
| 			if layer.Flags == nil {
 | |
| 				layer.Flags = make(map[string]interface{})
 | |
| 			}
 | |
| 			if cleanup, ok := layer.Flags[incompleteFlag]; ok {
 | |
| 				if b, ok := cleanup.(bool); ok && b {
 | |
| 					err = r.Delete(layer.ID)
 | |
| 					if err != nil {
 | |
| 						break
 | |
| 					}
 | |
| 					shouldSave = true
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 		if shouldSave {
 | |
| 			return r.Save()
 | |
| 		}
 | |
| 	}
 | |
| 	return err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Save() error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
 | |
| 	}
 | |
| 	rpath := r.layerspath()
 | |
| 	if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	jldata, err := json.Marshal(&r.layers)
 | |
| 	if err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	mpath := r.mountspath()
 | |
| 	if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	mounts := make([]layerMountPoint, 0, len(r.layers))
 | |
| 	for _, layer := range r.layers {
 | |
| 		if layer.MountPoint != "" && layer.MountCount > 0 {
 | |
| 			mounts = append(mounts, layerMountPoint{
 | |
| 				ID:         layer.ID,
 | |
| 				MountPoint: layer.MountPoint,
 | |
| 				MountCount: layer.MountCount,
 | |
| 			})
 | |
| 		}
 | |
| 	}
 | |
| 	jmdata, err := json.Marshal(&mounts)
 | |
| 	if err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
 | |
| 		return err
 | |
| 	}
 | |
| 	defer r.Touch()
 | |
| 	return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
 | |
| }
 | |
| 
 | |
| func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) {
 | |
| 	if err := os.MkdirAll(rundir, 0700); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	if err := os.MkdirAll(layerdir, 0700); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock"))
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	lockfile.Lock()
 | |
| 	defer lockfile.Unlock()
 | |
| 	rlstore := layerStore{
 | |
| 		lockfile: lockfile,
 | |
| 		driver:   driver,
 | |
| 		rundir:   rundir,
 | |
| 		layerdir: layerdir,
 | |
| 		byid:     make(map[string]*Layer),
 | |
| 		bymount:  make(map[string]*Layer),
 | |
| 		byname:   make(map[string]*Layer),
 | |
| 	}
 | |
| 	if err := rlstore.Load(); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	return &rlstore, nil
 | |
| }
 | |
| 
 | |
| func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) {
 | |
| 	lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock"))
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	lockfile.Lock()
 | |
| 	defer lockfile.Unlock()
 | |
| 	rlstore := layerStore{
 | |
| 		lockfile: lockfile,
 | |
| 		driver:   driver,
 | |
| 		rundir:   rundir,
 | |
| 		layerdir: layerdir,
 | |
| 		byid:     make(map[string]*Layer),
 | |
| 		bymount:  make(map[string]*Layer),
 | |
| 		byname:   make(map[string]*Layer),
 | |
| 	}
 | |
| 	if err := rlstore.Load(); err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	return &rlstore, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) lookup(id string) (*Layer, bool) {
 | |
| 	if layer, ok := r.byid[id]; ok {
 | |
| 		return layer, ok
 | |
| 	} else if layer, ok := r.byname[id]; ok {
 | |
| 		return layer, ok
 | |
| 	} else if longid, err := r.idindex.Get(id); err == nil {
 | |
| 		layer, ok := r.byid[longid]
 | |
| 		return layer, ok
 | |
| 	}
 | |
| 	return nil, false
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Size(name string) (int64, error) {
 | |
| 	layer, ok := r.lookup(name)
 | |
| 	if !ok {
 | |
| 		return -1, ErrLayerUnknown
 | |
| 	}
 | |
| 	// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
 | |
| 	// a zero value is not just present because it was never set to anything else (which can happen if the layer was
 | |
| 	// created by a version of this library that didn't keep track of digest and size information).
 | |
| 	if layer.UncompressedDigest != "" {
 | |
| 		return layer.UncompressedSize, nil
 | |
| 	}
 | |
| 	return -1, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) ClearFlag(id string, flag string) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath())
 | |
| 	}
 | |
| 	layer, ok := r.lookup(id)
 | |
| 	if !ok {
 | |
| 		return ErrLayerUnknown
 | |
| 	}
 | |
| 	delete(layer.Flags, flag)
 | |
| 	return r.Save()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) SetFlag(id string, flag string, value interface{}) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath())
 | |
| 	}
 | |
| 	layer, ok := r.lookup(id)
 | |
| 	if !ok {
 | |
| 		return ErrLayerUnknown
 | |
| 	}
 | |
| 	if layer.Flags == nil {
 | |
| 		layer.Flags = make(map[string]interface{})
 | |
| 	}
 | |
| 	layer.Flags[flag] = value
 | |
| 	return r.Save()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Status() ([][2]string, error) {
 | |
| 	return r.driver.Status(), nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
 | |
| 	}
 | |
| 	size = -1
 | |
| 	if err := os.MkdirAll(r.rundir, 0700); err != nil {
 | |
| 		return nil, -1, err
 | |
| 	}
 | |
| 	if err := os.MkdirAll(r.layerdir, 0700); err != nil {
 | |
| 		return nil, -1, err
 | |
| 	}
 | |
| 	if parent != "" {
 | |
| 		if parentLayer, ok := r.lookup(parent); ok {
 | |
| 			parent = parentLayer.ID
 | |
| 		}
 | |
| 	}
 | |
| 	if id == "" {
 | |
| 		id = stringid.GenerateRandomID()
 | |
| 		_, idInUse := r.byid[id]
 | |
| 		for idInUse {
 | |
| 			id = stringid.GenerateRandomID()
 | |
| 			_, idInUse = r.byid[id]
 | |
| 		}
 | |
| 	}
 | |
| 	if _, idInUse := r.byid[id]; idInUse {
 | |
| 		return nil, -1, ErrDuplicateID
 | |
| 	}
 | |
| 	names = dedupeNames(names)
 | |
| 	for _, name := range names {
 | |
| 		if _, nameInUse := r.byname[name]; nameInUse {
 | |
| 			return nil, -1, ErrDuplicateName
 | |
| 		}
 | |
| 	}
 | |
| 	opts := drivers.CreateOpts{
 | |
| 		MountLabel: mountLabel,
 | |
| 		StorageOpt: options,
 | |
| 	}
 | |
| 	if writeable {
 | |
| 		err = r.driver.CreateReadWrite(id, parent, &opts)
 | |
| 	} else {
 | |
| 		err = r.driver.Create(id, parent, &opts)
 | |
| 	}
 | |
| 	if err == nil {
 | |
| 		layer = &Layer{
 | |
| 			ID:         id,
 | |
| 			Parent:     parent,
 | |
| 			Names:      names,
 | |
| 			MountLabel: mountLabel,
 | |
| 			Created:    time.Now().UTC(),
 | |
| 			Flags:      make(map[string]interface{}),
 | |
| 		}
 | |
| 		r.layers = append(r.layers, layer)
 | |
| 		r.idindex.Add(id)
 | |
| 		r.byid[id] = layer
 | |
| 		for _, name := range names {
 | |
| 			r.byname[name] = layer
 | |
| 		}
 | |
| 		for flag, value := range flags {
 | |
| 			layer.Flags[flag] = value
 | |
| 		}
 | |
| 		if diff != nil {
 | |
| 			layer.Flags[incompleteFlag] = true
 | |
| 			err = r.Save()
 | |
| 			if err != nil {
 | |
| 				// We don't have a record of this layer, but at least
 | |
| 				// try to clean it up underneath us.
 | |
| 				r.driver.Remove(id)
 | |
| 				return nil, -1, err
 | |
| 			}
 | |
| 			size, err = r.ApplyDiff(layer.ID, diff)
 | |
| 			if err != nil {
 | |
| 				if r.Delete(layer.ID) != nil {
 | |
| 					// Either a driver error or an error saving.
 | |
| 					// We now have a layer that's been marked for
 | |
| 					// deletion but which we failed to remove.
 | |
| 				}
 | |
| 				return nil, -1, err
 | |
| 			}
 | |
| 			delete(layer.Flags, incompleteFlag)
 | |
| 		}
 | |
| 		err = r.Save()
 | |
| 		if err != nil {
 | |
| 			// We don't have a record of this layer, but at least
 | |
| 			// try to clean it up underneath us.
 | |
| 			r.driver.Remove(id)
 | |
| 			return nil, -1, err
 | |
| 		}
 | |
| 	}
 | |
| 	return layer, size, err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
 | |
| 	layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil)
 | |
| 	return layer, err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) {
 | |
| 	return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Mount(id, mountLabel string) (string, error) {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
 | |
| 	}
 | |
| 	layer, ok := r.lookup(id)
 | |
| 	if !ok {
 | |
| 		return "", ErrLayerUnknown
 | |
| 	}
 | |
| 	if layer.MountCount > 0 {
 | |
| 		layer.MountCount++
 | |
| 		return layer.MountPoint, r.Save()
 | |
| 	}
 | |
| 	if mountLabel == "" {
 | |
| 		mountLabel = layer.MountLabel
 | |
| 	}
 | |
| 	mountpoint, err := r.driver.Get(id, mountLabel)
 | |
| 	if mountpoint != "" && err == nil {
 | |
| 		if layer.MountPoint != "" {
 | |
| 			delete(r.bymount, layer.MountPoint)
 | |
| 		}
 | |
| 		layer.MountPoint = filepath.Clean(mountpoint)
 | |
| 		layer.MountCount++
 | |
| 		r.bymount[layer.MountPoint] = layer
 | |
| 		err = r.Save()
 | |
| 	}
 | |
| 	return mountpoint, err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Unmount(id string) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
 | |
| 	}
 | |
| 	layer, ok := r.lookup(id)
 | |
| 	if !ok {
 | |
| 		layerByMount, ok := r.bymount[filepath.Clean(id)]
 | |
| 		if !ok {
 | |
| 			return ErrLayerUnknown
 | |
| 		}
 | |
| 		layer = layerByMount
 | |
| 	}
 | |
| 	if layer.MountCount > 1 {
 | |
| 		layer.MountCount--
 | |
| 		return r.Save()
 | |
| 	}
 | |
| 	err := r.driver.Put(id)
 | |
| 	if err == nil || os.IsNotExist(err) {
 | |
| 		if layer.MountPoint != "" {
 | |
| 			delete(r.bymount, layer.MountPoint)
 | |
| 		}
 | |
| 		layer.MountCount--
 | |
| 		layer.MountPoint = ""
 | |
| 		err = r.Save()
 | |
| 	}
 | |
| 	return err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) removeName(layer *Layer, name string) {
 | |
| 	layer.Names = stringSliceWithoutValue(layer.Names, name)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) SetNames(id string, names []string) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath())
 | |
| 	}
 | |
| 	names = dedupeNames(names)
 | |
| 	if layer, ok := r.lookup(id); ok {
 | |
| 		for _, name := range layer.Names {
 | |
| 			delete(r.byname, name)
 | |
| 		}
 | |
| 		for _, name := range names {
 | |
| 			if otherLayer, ok := r.byname[name]; ok {
 | |
| 				r.removeName(otherLayer, name)
 | |
| 			}
 | |
| 			r.byname[name] = layer
 | |
| 		}
 | |
| 		layer.Names = names
 | |
| 		return r.Save()
 | |
| 	}
 | |
| 	return ErrLayerUnknown
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Metadata(id string) (string, error) {
 | |
| 	if layer, ok := r.lookup(id); ok {
 | |
| 		return layer.Metadata, nil
 | |
| 	}
 | |
| 	return "", ErrLayerUnknown
 | |
| }
 | |
| 
 | |
| func (r *layerStore) SetMetadata(id, metadata string) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath())
 | |
| 	}
 | |
| 	if layer, ok := r.lookup(id); ok {
 | |
| 		layer.Metadata = metadata
 | |
| 		return r.Save()
 | |
| 	}
 | |
| 	return ErrLayerUnknown
 | |
| }
 | |
| 
 | |
| func (r *layerStore) tspath(id string) string {
 | |
| 	return filepath.Join(r.layerdir, id+tarSplitSuffix)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Delete(id string) error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
 | |
| 	}
 | |
| 	layer, ok := r.lookup(id)
 | |
| 	if !ok {
 | |
| 		return ErrLayerUnknown
 | |
| 	}
 | |
| 	id = layer.ID
 | |
| 	for layer.MountCount > 0 {
 | |
| 		if err := r.Unmount(id); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 	err := r.driver.Remove(id)
 | |
| 	if err == nil {
 | |
| 		os.Remove(r.tspath(id))
 | |
| 		delete(r.byid, id)
 | |
| 		r.idindex.Delete(id)
 | |
| 		if layer.MountPoint != "" {
 | |
| 			delete(r.bymount, layer.MountPoint)
 | |
| 		}
 | |
| 		toDeleteIndex := -1
 | |
| 		for i, candidate := range r.layers {
 | |
| 			if candidate.ID == id {
 | |
| 				toDeleteIndex = i
 | |
| 				break
 | |
| 			}
 | |
| 		}
 | |
| 		if toDeleteIndex != -1 {
 | |
| 			// delete the layer at toDeleteIndex
 | |
| 			if toDeleteIndex == len(r.layers)-1 {
 | |
| 				r.layers = r.layers[:len(r.layers)-1]
 | |
| 			} else {
 | |
| 				r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...)
 | |
| 			}
 | |
| 		}
 | |
| 		if err = r.Save(); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 	return err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Lookup(name string) (id string, err error) {
 | |
| 	if layer, ok := r.lookup(name); ok {
 | |
| 		return layer.ID, nil
 | |
| 	}
 | |
| 	return "", ErrLayerUnknown
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Exists(id string) bool {
 | |
| 	_, ok := r.lookup(id)
 | |
| 	return ok
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Get(id string) (*Layer, error) {
 | |
| 	if layer, ok := r.lookup(id); ok {
 | |
| 		return layer, nil
 | |
| 	}
 | |
| 	return nil, ErrLayerUnknown
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Wipe() error {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath())
 | |
| 	}
 | |
| 	ids := make([]string, 0, len(r.byid))
 | |
| 	for id := range r.byid {
 | |
| 		ids = append(ids, id)
 | |
| 	}
 | |
| 	for _, id := range ids {
 | |
| 		if err := r.Delete(id); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, toLayer *Layer, err error) {
 | |
| 	var ok bool
 | |
| 	var fromLayer *Layer
 | |
| 	toLayer, ok = r.lookup(to)
 | |
| 	if !ok {
 | |
| 		return "", "", nil, ErrLayerUnknown
 | |
| 	}
 | |
| 	to = toLayer.ID
 | |
| 	if from == "" {
 | |
| 		from = toLayer.Parent
 | |
| 	}
 | |
| 	if from != "" {
 | |
| 		fromLayer, ok = r.lookup(from)
 | |
| 		if ok {
 | |
| 			from = fromLayer.ID
 | |
| 		} else {
 | |
| 			fromLayer, ok = r.lookup(toLayer.Parent)
 | |
| 			if ok {
 | |
| 				from = fromLayer.ID
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	return from, to, toLayer, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
 | |
| 	from, to, toLayer, err := r.findParentAndLayer(from, to)
 | |
| 	if err != nil {
 | |
| 		return nil, ErrLayerUnknown
 | |
| 	}
 | |
| 	return r.driver.Changes(to, from, toLayer.MountLabel)
 | |
| }
 | |
| 
 | |
| type simpleGetCloser struct {
 | |
| 	r    *layerStore
 | |
| 	path string
 | |
| 	id   string
 | |
| }
 | |
| 
 | |
| func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
 | |
| 	return os.Open(filepath.Join(s.path, path))
 | |
| }
 | |
| 
 | |
| func (s *simpleGetCloser) Close() error {
 | |
| 	return s.r.Unmount(s.id)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
 | |
| 	if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
 | |
| 		return getter.DiffGetter(id)
 | |
| 	}
 | |
| 	path, err := r.Mount(id, "")
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	return &simpleGetCloser{
 | |
| 		r:    r,
 | |
| 		path: path,
 | |
| 		id:   id,
 | |
| 	}, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
 | |
| 	var metadata storage.Unpacker
 | |
| 
 | |
| 	from, to, toLayer, err := r.findParentAndLayer(from, to)
 | |
| 	if err != nil {
 | |
| 		return nil, ErrLayerUnknown
 | |
| 	}
 | |
| 	// Default to applying the type of compression that we noted was used
 | |
| 	// for the layerdiff when it was applied.
 | |
| 	compression := toLayer.CompressionType
 | |
| 	// If a particular compression type (or no compression) was selected,
 | |
| 	// use that instead.
 | |
| 	if options != nil && options.Compression != nil {
 | |
| 		compression = *options.Compression
 | |
| 	}
 | |
| 	maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) {
 | |
| 		// Depending on whether or not compression is desired, return either the
 | |
| 		// passed-in ReadCloser, or a new one that provides its readers with a
 | |
| 		// compressed version of the data that the original would have provided
 | |
| 		// to its readers.
 | |
| 		if compression == archive.Uncompressed {
 | |
| 			return rc, nil
 | |
| 		}
 | |
| 		preader, pwriter := io.Pipe()
 | |
| 		compressor, err := archive.CompressStream(pwriter, compression)
 | |
| 		if err != nil {
 | |
| 			rc.Close()
 | |
| 			pwriter.Close()
 | |
| 			preader.Close()
 | |
| 			return nil, err
 | |
| 		}
 | |
| 		go func() {
 | |
| 			defer pwriter.Close()
 | |
| 			defer compressor.Close()
 | |
| 			defer rc.Close()
 | |
| 			io.Copy(compressor, rc)
 | |
| 		}()
 | |
| 		return preader, nil
 | |
| 	}
 | |
| 
 | |
| 	if from != toLayer.Parent {
 | |
| 		diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
 | |
| 		if err != nil {
 | |
| 			return nil, err
 | |
| 		}
 | |
| 		return maybeCompressReadCloser(diff)
 | |
| 	}
 | |
| 
 | |
| 	tsfile, err := os.Open(r.tspath(to))
 | |
| 	if err != nil {
 | |
| 		if !os.IsNotExist(err) {
 | |
| 			return nil, err
 | |
| 		}
 | |
| 		diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
 | |
| 		if err != nil {
 | |
| 			return nil, err
 | |
| 		}
 | |
| 		return maybeCompressReadCloser(diff)
 | |
| 	}
 | |
| 	defer tsfile.Close()
 | |
| 
 | |
| 	decompressor, err := gzip.NewReader(tsfile)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	defer decompressor.Close()
 | |
| 
 | |
| 	tsbytes, err := ioutil.ReadAll(decompressor)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes))
 | |
| 
 | |
| 	fgetter, err := r.newFileGetter(to)
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 
 | |
| 	tarstream := asm.NewOutputTarStream(fgetter, metadata)
 | |
| 	rc := ioutils.NewReadCloserWrapper(tarstream, func() error {
 | |
| 		err1 := tarstream.Close()
 | |
| 		err2 := fgetter.Close()
 | |
| 		if err2 == nil {
 | |
| 			return err1
 | |
| 		}
 | |
| 		return err2
 | |
| 	})
 | |
| 	return maybeCompressReadCloser(rc)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
 | |
| 	var toLayer *Layer
 | |
| 	from, to, toLayer, err = r.findParentAndLayer(from, to)
 | |
| 	if err != nil {
 | |
| 		return -1, ErrLayerUnknown
 | |
| 	}
 | |
| 	return r.driver.DiffSize(to, from, toLayer.MountLabel)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
 | |
| 	if !r.IsReadWrite() {
 | |
| 		return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath())
 | |
| 	}
 | |
| 
 | |
| 	layer, ok := r.lookup(to)
 | |
| 	if !ok {
 | |
| 		return -1, ErrLayerUnknown
 | |
| 	}
 | |
| 
 | |
| 	header := make([]byte, 10240)
 | |
| 	n, err := diff.Read(header)
 | |
| 	if err != nil && err != io.EOF {
 | |
| 		return -1, err
 | |
| 	}
 | |
| 
 | |
| 	compression := archive.DetectCompression(header[:n])
 | |
| 	compressedDigest := digest.Canonical.Digester()
 | |
| 	compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash())
 | |
| 	defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter)
 | |
| 
 | |
| 	tsdata := bytes.Buffer{}
 | |
| 	compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed)
 | |
| 	if err != nil {
 | |
| 		compressor = gzip.NewWriter(&tsdata)
 | |
| 	}
 | |
| 	metadata := storage.NewJSONPacker(compressor)
 | |
| 	uncompressed, err := archive.DecompressStream(defragmented)
 | |
| 	if err != nil {
 | |
| 		return -1, err
 | |
| 	}
 | |
| 	uncompressedDigest := digest.Canonical.Digester()
 | |
| 	uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash())
 | |
| 	payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter())
 | |
| 	if err != nil {
 | |
| 		return -1, err
 | |
| 	}
 | |
| 	size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, layer.MountLabel, payload)
 | |
| 	if err != nil {
 | |
| 		return -1, err
 | |
| 	}
 | |
| 	compressor.Close()
 | |
| 	if err == nil {
 | |
| 		if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil {
 | |
| 			return -1, err
 | |
| 		}
 | |
| 		if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil {
 | |
| 			return -1, err
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
 | |
| 		var newList []string
 | |
| 		if oldvalue != "" {
 | |
| 			for _, value := range (*m)[oldvalue] {
 | |
| 				if value != id {
 | |
| 					newList = append(newList, value)
 | |
| 				}
 | |
| 			}
 | |
| 			if len(newList) > 0 {
 | |
| 				(*m)[oldvalue] = newList
 | |
| 			} else {
 | |
| 				delete(*m, oldvalue)
 | |
| 			}
 | |
| 		}
 | |
| 		if newvalue != "" {
 | |
| 			(*m)[newvalue] = append((*m)[newvalue], id)
 | |
| 		}
 | |
| 	}
 | |
| 	updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID)
 | |
| 	layer.CompressedDigest = compressedDigest.Digest()
 | |
| 	layer.CompressedSize = compressedCounter.Count
 | |
| 	updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID)
 | |
| 	layer.UncompressedDigest = uncompressedDigest.Digest()
 | |
| 	layer.UncompressedSize = uncompressedCounter.Count
 | |
| 	layer.CompressionType = compression
 | |
| 
 | |
| 	err = r.Save()
 | |
| 
 | |
| 	return size, err
 | |
| }
 | |
| 
 | |
| func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) {
 | |
| 	var layers []Layer
 | |
| 	for _, layerID := range m[d] {
 | |
| 		layer, ok := r.lookup(layerID)
 | |
| 		if !ok {
 | |
| 			return nil, ErrLayerUnknown
 | |
| 		}
 | |
| 		layers = append(layers, *layer)
 | |
| 	}
 | |
| 	return layers, nil
 | |
| }
 | |
| 
 | |
| func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
 | |
| 	return r.layersByDigestMap(r.bycompressedsum, d)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
 | |
| 	return r.layersByDigestMap(r.byuncompressedsum, d)
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Lock() {
 | |
| 	r.lockfile.Lock()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Unlock() {
 | |
| 	r.lockfile.Unlock()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Touch() error {
 | |
| 	return r.lockfile.Touch()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) Modified() (bool, error) {
 | |
| 	return r.lockfile.Modified()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) IsReadWrite() bool {
 | |
| 	return r.lockfile.IsReadWrite()
 | |
| }
 | |
| 
 | |
| func (r *layerStore) TouchedSince(when time.Time) bool {
 | |
| 	return r.lockfile.TouchedSince(when)
 | |
| }
 |