dep ensure; updated vendor
This commit is contained in:
		
							parent
							
								
									89400b7410
								
							
						
					
					
						commit
						a6b971f8e6
					
				|  | @ -163,9 +163,14 @@ | ||||||
|     "builder/dockerfile/parser", |     "builder/dockerfile/parser", | ||||||
|     "builder/dockerfile/shell", |     "builder/dockerfile/shell", | ||||||
|     "client", |     "client", | ||||||
|  |     "pkg/archive", | ||||||
|  |     "pkg/fileutils", | ||||||
|     "pkg/homedir", |     "pkg/homedir", | ||||||
|     "pkg/idtools", |     "pkg/idtools", | ||||||
|  |     "pkg/ioutils", | ||||||
|  |     "pkg/longpath", | ||||||
|     "pkg/mount", |     "pkg/mount", | ||||||
|  |     "pkg/pools", | ||||||
|     "pkg/system" |     "pkg/system" | ||||||
|   ] |   ] | ||||||
|   revision = "b1a1234c60cf87048814aa37da523b03a7b0d344" |   revision = "b1a1234c60cf87048814aa37da523b03a7b0d344" | ||||||
|  | @ -487,6 +492,6 @@ | ||||||
| [solve-meta] | [solve-meta] | ||||||
|   analyzer-name = "dep" |   analyzer-name = "dep" | ||||||
|   analyzer-version = 1 |   analyzer-version = 1 | ||||||
|   inputs-digest = "eadec1feacc8473e54622d5f3a25fbc9c7fb1f9bd38776475c3e2d283bd80d2a" |   inputs-digest = "061d7ecaab8da6e0a543987bf870b813d2f8c88481c6cdffec33374150b66c98" | ||||||
|   solver-name = "gps-cdcl" |   solver-name = "gps-cdcl" | ||||||
|   solver-version = 1 |   solver-version = 1 | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -0,0 +1,92 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"golang.org/x/sys/unix" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { | ||||||
|  | 	if format == OverlayWhiteoutFormat { | ||||||
|  | 		return overlayWhiteoutConverter{} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type overlayWhiteoutConverter struct{} | ||||||
|  | 
 | ||||||
|  | func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { | ||||||
|  | 	// convert whiteouts to AUFS format
 | ||||||
|  | 	if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { | ||||||
|  | 		// we just rename the file and make it normal
 | ||||||
|  | 		dir, filename := filepath.Split(hdr.Name) | ||||||
|  | 		hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) | ||||||
|  | 		hdr.Mode = 0600 | ||||||
|  | 		hdr.Typeflag = tar.TypeReg | ||||||
|  | 		hdr.Size = 0 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if fi.Mode()&os.ModeDir != 0 { | ||||||
|  | 		// convert opaque dirs to AUFS format by writing an empty file with the prefix
 | ||||||
|  | 		opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		if len(opaque) == 1 && opaque[0] == 'y' { | ||||||
|  | 			if hdr.Xattrs != nil { | ||||||
|  | 				delete(hdr.Xattrs, "trusted.overlay.opaque") | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// create a header for the whiteout file
 | ||||||
|  | 			// it should inherit some properties from the parent, but be a regular file
 | ||||||
|  | 			wo = &tar.Header{ | ||||||
|  | 				Typeflag:   tar.TypeReg, | ||||||
|  | 				Mode:       hdr.Mode & int64(os.ModePerm), | ||||||
|  | 				Name:       filepath.Join(hdr.Name, WhiteoutOpaqueDir), | ||||||
|  | 				Size:       0, | ||||||
|  | 				Uid:        hdr.Uid, | ||||||
|  | 				Uname:      hdr.Uname, | ||||||
|  | 				Gid:        hdr.Gid, | ||||||
|  | 				Gname:      hdr.Gname, | ||||||
|  | 				AccessTime: hdr.AccessTime, | ||||||
|  | 				ChangeTime: hdr.ChangeTime, | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { | ||||||
|  | 	base := filepath.Base(path) | ||||||
|  | 	dir := filepath.Dir(path) | ||||||
|  | 
 | ||||||
|  | 	// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
 | ||||||
|  | 	if base == WhiteoutOpaqueDir { | ||||||
|  | 		err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) | ||||||
|  | 		// don't write the file itself
 | ||||||
|  | 		return false, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// if a file was deleted and we are using overlay, we need to create a character device
 | ||||||
|  | 	if strings.HasPrefix(base, WhiteoutPrefix) { | ||||||
|  | 		originalBase := base[len(WhiteoutPrefix):] | ||||||
|  | 		originalPath := filepath.Join(dir, originalBase) | ||||||
|  | 
 | ||||||
|  | 		if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { | ||||||
|  | 			return false, err | ||||||
|  | 		} | ||||||
|  | 		if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { | ||||||
|  | 			return false, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// don't write the file itself
 | ||||||
|  | 		return false, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return true, nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,7 @@ | ||||||
|  | // +build !linux
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,114 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"errors" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/idtools" | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	rsystem "github.com/opencontainers/runc/libcontainer/system" | ||||||
|  | 	"golang.org/x/sys/unix" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // fixVolumePathPrefix does platform specific processing to ensure that if
 | ||||||
|  | // the path being passed in is not in a volume path format, convert it to one.
 | ||||||
|  | func fixVolumePathPrefix(srcPath string) string { | ||||||
|  | 	return srcPath | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getWalkRoot calculates the root path when performing a TarWithOptions.
 | ||||||
|  | // We use a separate function as this is platform specific. On Linux, we
 | ||||||
|  | // can't use filepath.Join(srcPath,include) because this will clean away
 | ||||||
|  | // a trailing "." or "/" which may be important.
 | ||||||
|  | func getWalkRoot(srcPath string, include string) string { | ||||||
|  | 	return srcPath + string(filepath.Separator) + include | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CanonicalTarNameForPath returns platform-specific filepath
 | ||||||
|  | // to canonical posix-style path for tar archival. p is relative
 | ||||||
|  | // path.
 | ||||||
|  | func CanonicalTarNameForPath(p string) (string, error) { | ||||||
|  | 	return p, nil // already unix-style
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // chmodTarEntry is used to adjust the file permissions used in tar header based
 | ||||||
|  | // on the platform the archival is done.
 | ||||||
|  | 
 | ||||||
|  | func chmodTarEntry(perm os.FileMode) os.FileMode { | ||||||
|  | 	return perm // noop for unix as golang APIs provide perm bits correctly
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { | ||||||
|  | 	s, ok := stat.(*syscall.Stat_t) | ||||||
|  | 
 | ||||||
|  | 	if ok { | ||||||
|  | 		// Currently go does not fill in the major/minors
 | ||||||
|  | 		if s.Mode&unix.S_IFBLK != 0 || | ||||||
|  | 			s.Mode&unix.S_IFCHR != 0 { | ||||||
|  | 			hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
 | ||||||
|  | 			hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
 | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getInodeFromStat(stat interface{}) (inode uint64, err error) { | ||||||
|  | 	s, ok := stat.(*syscall.Stat_t) | ||||||
|  | 
 | ||||||
|  | 	if ok { | ||||||
|  | 		inode = s.Ino | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { | ||||||
|  | 	s, ok := stat.(*syscall.Stat_t) | ||||||
|  | 
 | ||||||
|  | 	if !ok { | ||||||
|  | 		return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") | ||||||
|  | 	} | ||||||
|  | 	return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // handleTarTypeBlockCharFifo is an OS-specific helper function used by
 | ||||||
|  | // createTarFile to handle the following types of header: Block; Char; Fifo
 | ||||||
|  | func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { | ||||||
|  | 	if rsystem.RunningInUserNS() { | ||||||
|  | 		// cannot create a device if running in user namespace
 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	mode := uint32(hdr.Mode & 07777) | ||||||
|  | 	switch hdr.Typeflag { | ||||||
|  | 	case tar.TypeBlock: | ||||||
|  | 		mode |= unix.S_IFBLK | ||||||
|  | 	case tar.TypeChar: | ||||||
|  | 		mode |= unix.S_IFCHR | ||||||
|  | 	case tar.TypeFifo: | ||||||
|  | 		mode |= unix.S_IFIFO | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { | ||||||
|  | 	if hdr.Typeflag == tar.TypeLink { | ||||||
|  | 		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { | ||||||
|  | 			if err := os.Chmod(path, hdrInfo.Mode()); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} else if hdr.Typeflag != tar.TypeSymlink { | ||||||
|  | 		if err := os.Chmod(path, hdrInfo.Mode()); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,77 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"fmt" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/idtools" | ||||||
|  | 	"github.com/docker/docker/pkg/longpath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // fixVolumePathPrefix does platform specific processing to ensure that if
 | ||||||
|  | // the path being passed in is not in a volume path format, convert it to one.
 | ||||||
|  | func fixVolumePathPrefix(srcPath string) string { | ||||||
|  | 	return longpath.AddPrefix(srcPath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // getWalkRoot calculates the root path when performing a TarWithOptions.
 | ||||||
|  | // We use a separate function as this is platform specific.
 | ||||||
|  | func getWalkRoot(srcPath string, include string) string { | ||||||
|  | 	return filepath.Join(srcPath, include) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CanonicalTarNameForPath returns platform-specific filepath
 | ||||||
|  | // to canonical posix-style path for tar archival. p is relative
 | ||||||
|  | // path.
 | ||||||
|  | func CanonicalTarNameForPath(p string) (string, error) { | ||||||
|  | 	// windows: convert windows style relative path with backslashes
 | ||||||
|  | 	// into forward slashes. Since windows does not allow '/' or '\'
 | ||||||
|  | 	// in file names, it is mostly safe to replace however we must
 | ||||||
|  | 	// check just in case
 | ||||||
|  | 	if strings.Contains(p, "/") { | ||||||
|  | 		return "", fmt.Errorf("Windows path contains forward slash: %s", p) | ||||||
|  | 	} | ||||||
|  | 	return strings.Replace(p, string(os.PathSeparator), "/", -1), nil | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // chmodTarEntry is used to adjust the file permissions used in tar header based
 | ||||||
|  | // on the platform the archival is done.
 | ||||||
|  | func chmodTarEntry(perm os.FileMode) os.FileMode { | ||||||
|  | 	//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
 | ||||||
|  | 	permPart := perm & os.ModePerm | ||||||
|  | 	noPermPart := perm &^ os.ModePerm | ||||||
|  | 	// Add the x bit: make everything +x from windows
 | ||||||
|  | 	permPart |= 0111 | ||||||
|  | 	permPart &= 0755 | ||||||
|  | 
 | ||||||
|  | 	return noPermPart | permPart | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { | ||||||
|  | 	// do nothing. no notion of Rdev, Nlink in stat on Windows
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getInodeFromStat(stat interface{}) (inode uint64, err error) { | ||||||
|  | 	// do nothing. no notion of Inode in stat on Windows
 | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // handleTarTypeBlockCharFifo is an OS-specific helper function used by
 | ||||||
|  | // createTarFile to handle the following types of header: Block; Char; Fifo
 | ||||||
|  | func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { | ||||||
|  | 	// no notion of file ownership mapping yet on Windows
 | ||||||
|  | 	return idtools.IDPair{0, 0}, nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,441 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"sort" | ||||||
|  | 	"strings" | ||||||
|  | 	"syscall" | ||||||
|  | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/idtools" | ||||||
|  | 	"github.com/docker/docker/pkg/pools" | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ChangeType represents the change type.
 | ||||||
|  | type ChangeType int | ||||||
|  | 
 | ||||||
|  | const ( | ||||||
|  | 	// ChangeModify represents the modify operation.
 | ||||||
|  | 	ChangeModify = iota | ||||||
|  | 	// ChangeAdd represents the add operation.
 | ||||||
|  | 	ChangeAdd | ||||||
|  | 	// ChangeDelete represents the delete operation.
 | ||||||
|  | 	ChangeDelete | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func (c ChangeType) String() string { | ||||||
|  | 	switch c { | ||||||
|  | 	case ChangeModify: | ||||||
|  | 		return "C" | ||||||
|  | 	case ChangeAdd: | ||||||
|  | 		return "A" | ||||||
|  | 	case ChangeDelete: | ||||||
|  | 		return "D" | ||||||
|  | 	} | ||||||
|  | 	return "" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Change represents a change, it wraps the change type and path.
 | ||||||
|  | // It describes changes of the files in the path respect to the
 | ||||||
|  | // parent layers. The change could be modify, add, delete.
 | ||||||
|  | // This is used for layer diff.
 | ||||||
|  | type Change struct { | ||||||
|  | 	Path string | ||||||
|  | 	Kind ChangeType | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (change *Change) String() string { | ||||||
|  | 	return fmt.Sprintf("%s %s", change.Kind, change.Path) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // for sort.Sort
 | ||||||
|  | type changesByPath []Change | ||||||
|  | 
 | ||||||
|  | func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } | ||||||
|  | func (c changesByPath) Len() int           { return len(c) } | ||||||
|  | func (c changesByPath) Swap(i, j int)      { c[j], c[i] = c[i], c[j] } | ||||||
|  | 
 | ||||||
|  | // Gnu tar and the go tar writer don't have sub-second mtime
 | ||||||
|  | // precision, which is problematic when we apply changes via tar
 | ||||||
|  | // files, we handle this by comparing for exact times, *or* same
 | ||||||
|  | // second count and either a or b having exactly 0 nanoseconds
 | ||||||
|  | func sameFsTime(a, b time.Time) bool { | ||||||
|  | 	return a == b || | ||||||
|  | 		(a.Unix() == b.Unix() && | ||||||
|  | 			(a.Nanosecond() == 0 || b.Nanosecond() == 0)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func sameFsTimeSpec(a, b syscall.Timespec) bool { | ||||||
|  | 	return a.Sec == b.Sec && | ||||||
|  | 		(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Changes walks the path rw and determines changes for the files in the path,
 | ||||||
|  | // with respect to the parent layers
 | ||||||
|  | func Changes(layers []string, rw string) ([]Change, error) { | ||||||
|  | 	return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func aufsMetadataSkip(path string) (skip bool, err error) { | ||||||
|  | 	skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) | ||||||
|  | 	if err != nil { | ||||||
|  | 		skip = true | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { | ||||||
|  | 	f := filepath.Base(path) | ||||||
|  | 
 | ||||||
|  | 	// If there is a whiteout, then the file was removed
 | ||||||
|  | 	if strings.HasPrefix(f, WhiteoutPrefix) { | ||||||
|  | 		originalFile := f[len(WhiteoutPrefix):] | ||||||
|  | 		return filepath.Join(filepath.Dir(path), originalFile), nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return "", nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type skipChange func(string) (bool, error) | ||||||
|  | type deleteChange func(string, string, os.FileInfo) (string, error) | ||||||
|  | 
 | ||||||
|  | func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { | ||||||
|  | 	var ( | ||||||
|  | 		changes     []Change | ||||||
|  | 		changedDirs = make(map[string]struct{}) | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Rebase path
 | ||||||
|  | 		path, err = filepath.Rel(rw, path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 		path = filepath.Join(string(os.PathSeparator), path) | ||||||
|  | 
 | ||||||
|  | 		// Skip root
 | ||||||
|  | 		if path == string(os.PathSeparator) { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if sc != nil { | ||||||
|  | 			if skip, err := sc(path); skip { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		change := Change{ | ||||||
|  | 			Path: path, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		deletedFile, err := dc(rw, path, f) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Find out what kind of modification happened
 | ||||||
|  | 		if deletedFile != "" { | ||||||
|  | 			change.Path = deletedFile | ||||||
|  | 			change.Kind = ChangeDelete | ||||||
|  | 		} else { | ||||||
|  | 			// Otherwise, the file was added
 | ||||||
|  | 			change.Kind = ChangeAdd | ||||||
|  | 
 | ||||||
|  | 			// ...Unless it already existed in a top layer, in which case, it's a modification
 | ||||||
|  | 			for _, layer := range layers { | ||||||
|  | 				stat, err := os.Stat(filepath.Join(layer, path)) | ||||||
|  | 				if err != nil && !os.IsNotExist(err) { | ||||||
|  | 					return err | ||||||
|  | 				} | ||||||
|  | 				if err == nil { | ||||||
|  | 					// The file existed in the top layer, so that's a modification
 | ||||||
|  | 
 | ||||||
|  | 					// However, if it's a directory, maybe it wasn't actually modified.
 | ||||||
|  | 					// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
 | ||||||
|  | 					if stat.IsDir() && f.IsDir() { | ||||||
|  | 						if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { | ||||||
|  | 							// Both directories are the same, don't record the change
 | ||||||
|  | 							return nil | ||||||
|  | 						} | ||||||
|  | 					} | ||||||
|  | 					change.Kind = ChangeModify | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
 | ||||||
|  | 		// This block is here to ensure the change is recorded even if the
 | ||||||
|  | 		// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
 | ||||||
|  | 		// Check https://github.com/docker/docker/pull/13590 for details.
 | ||||||
|  | 		if f.IsDir() { | ||||||
|  | 			changedDirs[path] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 		if change.Kind == ChangeAdd || change.Kind == ChangeDelete { | ||||||
|  | 			parent := filepath.Dir(path) | ||||||
|  | 			if _, ok := changedDirs[parent]; !ok && parent != "/" { | ||||||
|  | 				changes = append(changes, Change{Path: parent, Kind: ChangeModify}) | ||||||
|  | 				changedDirs[parent] = struct{}{} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Record change
 | ||||||
|  | 		changes = append(changes, change) | ||||||
|  | 		return nil | ||||||
|  | 	}) | ||||||
|  | 	if err != nil && !os.IsNotExist(err) { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return changes, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FileInfo describes the information of a file.
 | ||||||
|  | type FileInfo struct { | ||||||
|  | 	parent     *FileInfo | ||||||
|  | 	name       string | ||||||
|  | 	stat       *system.StatT | ||||||
|  | 	children   map[string]*FileInfo | ||||||
|  | 	capability []byte | ||||||
|  | 	added      bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // LookUp looks up the file information of a file.
 | ||||||
|  | func (info *FileInfo) LookUp(path string) *FileInfo { | ||||||
|  | 	// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 	parent := info | ||||||
|  | 	if path == string(os.PathSeparator) { | ||||||
|  | 		return info | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	pathElements := strings.Split(path, string(os.PathSeparator)) | ||||||
|  | 	for _, elem := range pathElements { | ||||||
|  | 		if elem != "" { | ||||||
|  | 			child := parent.children[elem] | ||||||
|  | 			if child == nil { | ||||||
|  | 				return nil | ||||||
|  | 			} | ||||||
|  | 			parent = child | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return parent | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (info *FileInfo) path() string { | ||||||
|  | 	if info.parent == nil { | ||||||
|  | 		// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 		return string(os.PathSeparator) | ||||||
|  | 	} | ||||||
|  | 	return filepath.Join(info.parent.path(), info.name) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { | ||||||
|  | 
 | ||||||
|  | 	sizeAtEntry := len(*changes) | ||||||
|  | 
 | ||||||
|  | 	if oldInfo == nil { | ||||||
|  | 		// add
 | ||||||
|  | 		change := Change{ | ||||||
|  | 			Path: info.path(), | ||||||
|  | 			Kind: ChangeAdd, | ||||||
|  | 		} | ||||||
|  | 		*changes = append(*changes, change) | ||||||
|  | 		info.added = true | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// We make a copy so we can modify it to detect additions
 | ||||||
|  | 	// also, we only recurse on the old dir if the new info is a directory
 | ||||||
|  | 	// otherwise any previous delete/change is considered recursive
 | ||||||
|  | 	oldChildren := make(map[string]*FileInfo) | ||||||
|  | 	if oldInfo != nil && info.isDir() { | ||||||
|  | 		for k, v := range oldInfo.children { | ||||||
|  | 			oldChildren[k] = v | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for name, newChild := range info.children { | ||||||
|  | 		oldChild := oldChildren[name] | ||||||
|  | 		if oldChild != nil { | ||||||
|  | 			// change?
 | ||||||
|  | 			oldStat := oldChild.stat | ||||||
|  | 			newStat := newChild.stat | ||||||
|  | 			// Note: We can't compare inode or ctime or blocksize here, because these change
 | ||||||
|  | 			// when copying a file into a container. However, that is not generally a problem
 | ||||||
|  | 			// because any content change will change mtime, and any status change should
 | ||||||
|  | 			// be visible when actually comparing the stat fields. The only time this
 | ||||||
|  | 			// breaks down is if some code intentionally hides a change by setting
 | ||||||
|  | 			// back mtime
 | ||||||
|  | 			if statDifferent(oldStat, newStat) || | ||||||
|  | 				!bytes.Equal(oldChild.capability, newChild.capability) { | ||||||
|  | 				change := Change{ | ||||||
|  | 					Path: newChild.path(), | ||||||
|  | 					Kind: ChangeModify, | ||||||
|  | 				} | ||||||
|  | 				*changes = append(*changes, change) | ||||||
|  | 				newChild.added = true | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Remove from copy so we can detect deletions
 | ||||||
|  | 			delete(oldChildren, name) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		newChild.addChanges(oldChild, changes) | ||||||
|  | 	} | ||||||
|  | 	for _, oldChild := range oldChildren { | ||||||
|  | 		// delete
 | ||||||
|  | 		change := Change{ | ||||||
|  | 			Path: oldChild.path(), | ||||||
|  | 			Kind: ChangeDelete, | ||||||
|  | 		} | ||||||
|  | 		*changes = append(*changes, change) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If there were changes inside this directory, we need to add it, even if the directory
 | ||||||
|  | 	// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
 | ||||||
|  | 	// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 	if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { | ||||||
|  | 		change := Change{ | ||||||
|  | 			Path: info.path(), | ||||||
|  | 			Kind: ChangeModify, | ||||||
|  | 		} | ||||||
|  | 		// Let's insert the directory entry before the recently added entries located inside this dir
 | ||||||
|  | 		*changes = append(*changes, change) // just to resize the slice, will be overwritten
 | ||||||
|  | 		copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) | ||||||
|  | 		(*changes)[sizeAtEntry] = change | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Changes add changes to file information.
 | ||||||
|  | func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { | ||||||
|  | 	var changes []Change | ||||||
|  | 
 | ||||||
|  | 	info.addChanges(oldInfo, &changes) | ||||||
|  | 
 | ||||||
|  | 	return changes | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newRootFileInfo() *FileInfo { | ||||||
|  | 	// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 	root := &FileInfo{ | ||||||
|  | 		name:     string(os.PathSeparator), | ||||||
|  | 		children: make(map[string]*FileInfo), | ||||||
|  | 	} | ||||||
|  | 	return root | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ChangesDirs compares two directories and generates an array of Change objects describing the changes.
 | ||||||
|  | // If oldDir is "", then all files in newDir will be Add-Changes.
 | ||||||
|  | func ChangesDirs(newDir, oldDir string) ([]Change, error) { | ||||||
|  | 	var ( | ||||||
|  | 		oldRoot, newRoot *FileInfo | ||||||
|  | 	) | ||||||
|  | 	if oldDir == "" { | ||||||
|  | 		emptyDir, err := ioutil.TempDir("", "empty") | ||||||
|  | 		if err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		defer os.Remove(emptyDir) | ||||||
|  | 		oldDir = emptyDir | ||||||
|  | 	} | ||||||
|  | 	oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return newRoot.Changes(oldRoot), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ChangesSize calculates the size in bytes of the provided changes, based on newDir.
 | ||||||
|  | func ChangesSize(newDir string, changes []Change) int64 { | ||||||
|  | 	var ( | ||||||
|  | 		size int64 | ||||||
|  | 		sf   = make(map[uint64]struct{}) | ||||||
|  | 	) | ||||||
|  | 	for _, change := range changes { | ||||||
|  | 		if change.Kind == ChangeModify || change.Kind == ChangeAdd { | ||||||
|  | 			file := filepath.Join(newDir, change.Path) | ||||||
|  | 			fileInfo, err := os.Lstat(file) | ||||||
|  | 			if err != nil { | ||||||
|  | 				logrus.Errorf("Can not stat %q: %s", file, err) | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if fileInfo != nil && !fileInfo.IsDir() { | ||||||
|  | 				if hasHardlinks(fileInfo) { | ||||||
|  | 					inode := getIno(fileInfo) | ||||||
|  | 					if _, ok := sf[inode]; !ok { | ||||||
|  | 						size += fileInfo.Size() | ||||||
|  | 						sf[inode] = struct{}{} | ||||||
|  | 					} | ||||||
|  | 				} else { | ||||||
|  | 					size += fileInfo.Size() | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return size | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ExportChanges produces an Archive from the provided changes, relative to dir.
 | ||||||
|  | func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { | ||||||
|  | 	reader, writer := io.Pipe() | ||||||
|  | 	go func() { | ||||||
|  | 		ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) | ||||||
|  | 
 | ||||||
|  | 		// this buffer is needed for the duration of this piped stream
 | ||||||
|  | 		defer pools.BufioWriter32KPool.Put(ta.Buffer) | ||||||
|  | 
 | ||||||
|  | 		sort.Sort(changesByPath(changes)) | ||||||
|  | 
 | ||||||
|  | 		// In general we log errors here but ignore them because
 | ||||||
|  | 		// during e.g. a diff operation the container can continue
 | ||||||
|  | 		// mutating the filesystem and we can see transient errors
 | ||||||
|  | 		// from this
 | ||||||
|  | 		for _, change := range changes { | ||||||
|  | 			if change.Kind == ChangeDelete { | ||||||
|  | 				whiteOutDir := filepath.Dir(change.Path) | ||||||
|  | 				whiteOutBase := filepath.Base(change.Path) | ||||||
|  | 				whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) | ||||||
|  | 				timestamp := time.Now() | ||||||
|  | 				hdr := &tar.Header{ | ||||||
|  | 					Name:       whiteOut[1:], | ||||||
|  | 					Size:       0, | ||||||
|  | 					ModTime:    timestamp, | ||||||
|  | 					AccessTime: timestamp, | ||||||
|  | 					ChangeTime: timestamp, | ||||||
|  | 				} | ||||||
|  | 				if err := ta.TarWriter.WriteHeader(hdr); err != nil { | ||||||
|  | 					logrus.Debugf("Can't write whiteout header: %s", err) | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
|  | 				path := filepath.Join(dir, change.Path) | ||||||
|  | 				if err := ta.addTarFile(path, change.Path[1:]); err != nil { | ||||||
|  | 					logrus.Debugf("Can't add file %s to tar: %s", path, err) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Make sure to check the error on Close.
 | ||||||
|  | 		if err := ta.TarWriter.Close(); err != nil { | ||||||
|  | 			logrus.Debugf("Can't close layer: %s", err) | ||||||
|  | 		} | ||||||
|  | 		if err := writer.Close(); err != nil { | ||||||
|  | 			logrus.Debugf("failed close Changes writer: %s", err) | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 	return reader, nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,313 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"sort" | ||||||
|  | 	"syscall" | ||||||
|  | 	"unsafe" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"golang.org/x/sys/unix" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // walker is used to implement collectFileInfoForChanges on linux. Where this
 | ||||||
|  | // method in general returns the entire contents of two directory trees, we
 | ||||||
|  | // optimize some FS calls out on linux. In particular, we take advantage of the
 | ||||||
|  | // fact that getdents(2) returns the inode of each file in the directory being
 | ||||||
|  | // walked, which, when walking two trees in parallel to generate a list of
 | ||||||
|  | // changes, can be used to prune subtrees without ever having to lstat(2) them
 | ||||||
|  | // directly. Eliminating stat calls in this way can save up to seconds on large
 | ||||||
|  | // images.
 | ||||||
|  | type walker struct { | ||||||
|  | 	dir1  string | ||||||
|  | 	dir2  string | ||||||
|  | 	root1 *FileInfo | ||||||
|  | 	root2 *FileInfo | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // collectFileInfoForChanges returns a complete representation of the trees
 | ||||||
|  | // rooted at dir1 and dir2, with one important exception: any subtree or
 | ||||||
|  | // leaf where the inode and device numbers are an exact match between dir1
 | ||||||
|  | // and dir2 will be pruned from the results. This method is *only* to be used
 | ||||||
|  | // to generating a list of changes between the two directories, as it does not
 | ||||||
|  | // reflect the full contents.
 | ||||||
|  | func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { | ||||||
|  | 	w := &walker{ | ||||||
|  | 		dir1:  dir1, | ||||||
|  | 		dir2:  dir2, | ||||||
|  | 		root1: newRootFileInfo(), | ||||||
|  | 		root2: newRootFileInfo(), | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	i1, err := os.Lstat(w.dir1) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, nil, err | ||||||
|  | 	} | ||||||
|  | 	i2, err := os.Lstat(w.dir2) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err := w.walk("/", i1, i2); err != nil { | ||||||
|  | 		return nil, nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return w.root1, w.root2, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Given a FileInfo, its path info, and a reference to the root of the tree
 | ||||||
|  | // being constructed, register this file with the tree.
 | ||||||
|  | func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { | ||||||
|  | 	if fi == nil { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	parent := root.LookUp(filepath.Dir(path)) | ||||||
|  | 	if parent == nil { | ||||||
|  | 		return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) | ||||||
|  | 	} | ||||||
|  | 	info := &FileInfo{ | ||||||
|  | 		name:     filepath.Base(path), | ||||||
|  | 		children: make(map[string]*FileInfo), | ||||||
|  | 		parent:   parent, | ||||||
|  | 	} | ||||||
|  | 	cpath := filepath.Join(dir, path) | ||||||
|  | 	stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	info.stat = stat | ||||||
|  | 	info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
 | ||||||
|  | 	parent.children[info.name] = info | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Walk a subtree rooted at the same path in both trees being iterated. For
 | ||||||
|  | // example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
 | ||||||
|  | func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { | ||||||
|  | 	// Register these nodes with the return trees, unless we're still at the
 | ||||||
|  | 	// (already-created) roots:
 | ||||||
|  | 	if path != "/" { | ||||||
|  | 		if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	is1Dir := i1 != nil && i1.IsDir() | ||||||
|  | 	is2Dir := i2 != nil && i2.IsDir() | ||||||
|  | 
 | ||||||
|  | 	sameDevice := false | ||||||
|  | 	if i1 != nil && i2 != nil { | ||||||
|  | 		si1 := i1.Sys().(*syscall.Stat_t) | ||||||
|  | 		si2 := i2.Sys().(*syscall.Stat_t) | ||||||
|  | 		if si1.Dev == si2.Dev { | ||||||
|  | 			sameDevice = true | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// If these files are both non-existent, or leaves (non-dirs), we are done.
 | ||||||
|  | 	if !is1Dir && !is2Dir { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Fetch the names of all the files contained in both directories being walked:
 | ||||||
|  | 	var names1, names2 []nameIno | ||||||
|  | 	if is1Dir { | ||||||
|  | 		names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
 | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if is2Dir { | ||||||
|  | 		names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
 | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// We have lists of the files contained in both parallel directories, sorted
 | ||||||
|  | 	// in the same order. Walk them in parallel, generating a unique merged list
 | ||||||
|  | 	// of all items present in either or both directories.
 | ||||||
|  | 	var names []string | ||||||
|  | 	ix1 := 0 | ||||||
|  | 	ix2 := 0 | ||||||
|  | 
 | ||||||
|  | 	for { | ||||||
|  | 		if ix1 >= len(names1) { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 		if ix2 >= len(names2) { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		ni1 := names1[ix1] | ||||||
|  | 		ni2 := names2[ix2] | ||||||
|  | 
 | ||||||
|  | 		switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { | ||||||
|  | 		case -1: // ni1 < ni2 -- advance ni1
 | ||||||
|  | 			// we will not encounter ni1 in names2
 | ||||||
|  | 			names = append(names, ni1.name) | ||||||
|  | 			ix1++ | ||||||
|  | 		case 0: // ni1 == ni2
 | ||||||
|  | 			if ni1.ino != ni2.ino || !sameDevice { | ||||||
|  | 				names = append(names, ni1.name) | ||||||
|  | 			} | ||||||
|  | 			ix1++ | ||||||
|  | 			ix2++ | ||||||
|  | 		case 1: // ni1 > ni2 -- advance ni2
 | ||||||
|  | 			// we will not encounter ni2 in names1
 | ||||||
|  | 			names = append(names, ni2.name) | ||||||
|  | 			ix2++ | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	for ix1 < len(names1) { | ||||||
|  | 		names = append(names, names1[ix1].name) | ||||||
|  | 		ix1++ | ||||||
|  | 	} | ||||||
|  | 	for ix2 < len(names2) { | ||||||
|  | 		names = append(names, names2[ix2].name) | ||||||
|  | 		ix2++ | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// For each of the names present in either or both of the directories being
 | ||||||
|  | 	// iterated, stat the name under each root, and recurse the pair of them:
 | ||||||
|  | 	for _, name := range names { | ||||||
|  | 		fname := filepath.Join(path, name) | ||||||
|  | 		var cInfo1, cInfo2 os.FileInfo | ||||||
|  | 		if is1Dir { | ||||||
|  | 			cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
 | ||||||
|  | 			if err != nil && !os.IsNotExist(err) { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if is2Dir { | ||||||
|  | 			cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
 | ||||||
|  | 			if err != nil && !os.IsNotExist(err) { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if err = w.walk(fname, cInfo1, cInfo2); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // {name,inode} pairs used to support the early-pruning logic of the walker type
 | ||||||
|  | type nameIno struct { | ||||||
|  | 	name string | ||||||
|  | 	ino  uint64 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type nameInoSlice []nameIno | ||||||
|  | 
 | ||||||
|  | func (s nameInoSlice) Len() int           { return len(s) } | ||||||
|  | func (s nameInoSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] } | ||||||
|  | func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } | ||||||
|  | 
 | ||||||
|  | // readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
 | ||||||
|  | // numbers further up the stack when reading directory contents. Unlike
 | ||||||
|  | // os.Readdirnames, which returns a list of filenames, this function returns a
 | ||||||
|  | // list of {filename,inode} pairs.
 | ||||||
|  | func readdirnames(dirname string) (names []nameIno, err error) { | ||||||
|  | 	var ( | ||||||
|  | 		size = 100 | ||||||
|  | 		buf  = make([]byte, 4096) | ||||||
|  | 		nbuf int | ||||||
|  | 		bufp int | ||||||
|  | 		nb   int | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	f, err := os.Open(dirname) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	defer f.Close() | ||||||
|  | 
 | ||||||
|  | 	names = make([]nameIno, 0, size) // Empty with room to grow.
 | ||||||
|  | 	for { | ||||||
|  | 		// Refill the buffer if necessary
 | ||||||
|  | 		if bufp >= nbuf { | ||||||
|  | 			bufp = 0 | ||||||
|  | 			nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
 | ||||||
|  | 			if nbuf < 0 { | ||||||
|  | 				nbuf = 0 | ||||||
|  | 			} | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, os.NewSyscallError("readdirent", err) | ||||||
|  | 			} | ||||||
|  | 			if nbuf <= 0 { | ||||||
|  | 				break // EOF
 | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Drain the buffer
 | ||||||
|  | 		nb, names = parseDirent(buf[bufp:nbuf], names) | ||||||
|  | 		bufp += nb | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	sl := nameInoSlice(names) | ||||||
|  | 	sort.Sort(sl) | ||||||
|  | 	return sl, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // parseDirent is a minor modification of unix.ParseDirent (linux version)
 | ||||||
|  | // which returns {name,inode} pairs instead of just names.
 | ||||||
|  | func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { | ||||||
|  | 	origlen := len(buf) | ||||||
|  | 	for len(buf) > 0 { | ||||||
|  | 		dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) | ||||||
|  | 		buf = buf[dirent.Reclen:] | ||||||
|  | 		if dirent.Ino == 0 { // File absent in directory.
 | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) | ||||||
|  | 		var name = string(bytes[0:clen(bytes[:])]) | ||||||
|  | 		if name == "." || name == ".." { // Useless names
 | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		names = append(names, nameIno{name, dirent.Ino}) | ||||||
|  | 	} | ||||||
|  | 	return origlen - len(buf), names | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func clen(n []byte) int { | ||||||
|  | 	for i := 0; i < len(n); i++ { | ||||||
|  | 		if n[i] == 0 { | ||||||
|  | 			return i | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return len(n) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OverlayChanges walks the path rw and determines changes for the files in the path,
 | ||||||
|  | // with respect to the parent layers
 | ||||||
|  | func OverlayChanges(layers []string, rw string) ([]Change, error) { | ||||||
|  | 	return changes(layers, rw, overlayDeletedFile, nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { | ||||||
|  | 	if fi.Mode()&os.ModeCharDevice != 0 { | ||||||
|  | 		s := fi.Sys().(*syscall.Stat_t) | ||||||
|  | 		if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
 | ||||||
|  | 			return path, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if fi.Mode()&os.ModeDir != 0 { | ||||||
|  | 		opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") | ||||||
|  | 		if err != nil { | ||||||
|  | 			return "", err | ||||||
|  | 		} | ||||||
|  | 		if len(opaque) == 1 && opaque[0] == 'y' { | ||||||
|  | 			return path, nil | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return "", nil | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,97 @@ | ||||||
|  | // +build !linux
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"runtime" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { | ||||||
|  | 	var ( | ||||||
|  | 		oldRoot, newRoot *FileInfo | ||||||
|  | 		err1, err2       error | ||||||
|  | 		errs             = make(chan error, 2) | ||||||
|  | 	) | ||||||
|  | 	go func() { | ||||||
|  | 		oldRoot, err1 = collectFileInfo(oldDir) | ||||||
|  | 		errs <- err1 | ||||||
|  | 	}() | ||||||
|  | 	go func() { | ||||||
|  | 		newRoot, err2 = collectFileInfo(newDir) | ||||||
|  | 		errs <- err2 | ||||||
|  | 	}() | ||||||
|  | 
 | ||||||
|  | 	// block until both routines have returned
 | ||||||
|  | 	for i := 0; i < 2; i++ { | ||||||
|  | 		if err := <-errs; err != nil { | ||||||
|  | 			return nil, nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return oldRoot, newRoot, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func collectFileInfo(sourceDir string) (*FileInfo, error) { | ||||||
|  | 	root := newRootFileInfo() | ||||||
|  | 
 | ||||||
|  | 	err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Rebase path
 | ||||||
|  | 		relPath, err := filepath.Rel(sourceDir, path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// As this runs on the daemon side, file paths are OS specific.
 | ||||||
|  | 		relPath = filepath.Join(string(os.PathSeparator), relPath) | ||||||
|  | 
 | ||||||
|  | 		// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
 | ||||||
|  | 		// Temporary workaround. If the returned path starts with two backslashes,
 | ||||||
|  | 		// trim it down to a single backslash. Only relevant on Windows.
 | ||||||
|  | 		if runtime.GOOS == "windows" { | ||||||
|  | 			if strings.HasPrefix(relPath, `\\`) { | ||||||
|  | 				relPath = relPath[1:] | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if relPath == string(os.PathSeparator) { | ||||||
|  | 			return nil | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		parent := root.LookUp(filepath.Dir(relPath)) | ||||||
|  | 		if parent == nil { | ||||||
|  | 			return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		info := &FileInfo{ | ||||||
|  | 			name:     filepath.Base(relPath), | ||||||
|  | 			children: make(map[string]*FileInfo), | ||||||
|  | 			parent:   parent, | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		s, err := system.Lstat(path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		info.stat = s | ||||||
|  | 
 | ||||||
|  | 		info.capability, _ = system.Lgetxattr(path, "security.capability") | ||||||
|  | 
 | ||||||
|  | 		parent.children[info.name] = info | ||||||
|  | 
 | ||||||
|  | 		return nil | ||||||
|  | 	}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return root, nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,37 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"syscall" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"golang.org/x/sys/unix" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { | ||||||
|  | 	// Don't look at size for dirs, its not a good measure of change
 | ||||||
|  | 	if oldStat.Mode() != newStat.Mode() || | ||||||
|  | 		oldStat.UID() != newStat.UID() || | ||||||
|  | 		oldStat.GID() != newStat.GID() || | ||||||
|  | 		oldStat.Rdev() != newStat.Rdev() || | ||||||
|  | 		// Don't look at size for dirs, its not a good measure of change
 | ||||||
|  | 		(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && | ||||||
|  | 			(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (info *FileInfo) isDir() bool { | ||||||
|  | 	return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getIno(fi os.FileInfo) uint64 { | ||||||
|  | 	return fi.Sys().(*syscall.Stat_t).Ino | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func hasHardlinks(fi os.FileInfo) bool { | ||||||
|  | 	return fi.Sys().(*syscall.Stat_t).Nlink > 1 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,30 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { | ||||||
|  | 
 | ||||||
|  | 	// Don't look at size for dirs, its not a good measure of change
 | ||||||
|  | 	if oldStat.Mtim() != newStat.Mtim() || | ||||||
|  | 		oldStat.Mode() != newStat.Mode() || | ||||||
|  | 		oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (info *FileInfo) isDir() bool { | ||||||
|  | 	return info.parent == nil || info.stat.Mode().IsDir() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getIno(fi os.FileInfo) (inode uint64) { | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func hasHardlinks(fi os.FileInfo) bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | @ -0,0 +1,472 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Errors used or returned by this file.
 | ||||||
|  | var ( | ||||||
|  | 	ErrNotDirectory      = errors.New("not a directory") | ||||||
|  | 	ErrDirNotExists      = errors.New("no such directory") | ||||||
|  | 	ErrCannotCopyDir     = errors.New("cannot copy directory") | ||||||
|  | 	ErrInvalidCopySource = errors.New("invalid copy source content") | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // PreserveTrailingDotOrSeparator returns the given cleaned path (after
 | ||||||
|  | // processing using any utility functions from the path or filepath stdlib
 | ||||||
|  | // packages) and appends a trailing `/.` or `/` if its corresponding  original
 | ||||||
|  | // path (from before being processed by utility functions from the path or
 | ||||||
|  | // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
 | ||||||
|  | // path already ends in a `.` path segment, then another is not added. If the
 | ||||||
|  | // clean path already ends in the separator, then another is not added.
 | ||||||
|  | func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { | ||||||
|  | 	// Ensure paths are in platform semantics
 | ||||||
|  | 	cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) | ||||||
|  | 	originalPath = strings.Replace(originalPath, "/", string(sep), -1) | ||||||
|  | 
 | ||||||
|  | 	if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { | ||||||
|  | 		if !hasTrailingPathSeparator(cleanedPath, sep) { | ||||||
|  | 			// Add a separator if it doesn't already end with one (a cleaned
 | ||||||
|  | 			// path would only end in a separator if it is the root).
 | ||||||
|  | 			cleanedPath += string(sep) | ||||||
|  | 		} | ||||||
|  | 		cleanedPath += "." | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { | ||||||
|  | 		cleanedPath += string(sep) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return cleanedPath | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // assertsDirectory returns whether the given path is
 | ||||||
|  | // asserted to be a directory, i.e., the path ends with
 | ||||||
|  | // a trailing '/' or `/.`, assuming a path separator of `/`.
 | ||||||
|  | func assertsDirectory(path string, sep byte) bool { | ||||||
|  | 	return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // hasTrailingPathSeparator returns whether the given
 | ||||||
|  | // path ends with the system's path separator character.
 | ||||||
|  | func hasTrailingPathSeparator(path string, sep byte) bool { | ||||||
|  | 	return len(path) > 0 && path[len(path)-1] == sep | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // specifiesCurrentDir returns whether the given path specifies
 | ||||||
|  | // a "current directory", i.e., the last path segment is `.`.
 | ||||||
|  | func specifiesCurrentDir(path string) bool { | ||||||
|  | 	return filepath.Base(path) == "." | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SplitPathDirEntry splits the given path between its directory name and its
 | ||||||
|  | // basename by first cleaning the path but preserves a trailing "." if the
 | ||||||
|  | // original path specified the current directory.
 | ||||||
|  | func SplitPathDirEntry(path string) (dir, base string) { | ||||||
|  | 	cleanedPath := filepath.Clean(filepath.FromSlash(path)) | ||||||
|  | 
 | ||||||
|  | 	if specifiesCurrentDir(path) { | ||||||
|  | 		cleanedPath += string(os.PathSeparator) + "." | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TarResource archives the resource described by the given CopyInfo to a Tar
 | ||||||
|  | // archive. A non-nil error is returned if sourcePath does not exist or is
 | ||||||
|  | // asserted to be a directory but exists as another type of file.
 | ||||||
|  | //
 | ||||||
|  | // This function acts as a convenient wrapper around TarWithOptions, which
 | ||||||
|  | // requires a directory as the source path. TarResource accepts either a
 | ||||||
|  | // directory or a file path and correctly sets the Tar options.
 | ||||||
|  | func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { | ||||||
|  | 	return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TarResourceRebase is like TarResource but renames the first path element of
 | ||||||
|  | // items in the resulting tar archive to match the given rebaseName if not "".
 | ||||||
|  | func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { | ||||||
|  | 	sourcePath = normalizePath(sourcePath) | ||||||
|  | 	if _, err = os.Lstat(sourcePath); err != nil { | ||||||
|  | 		// Catches the case where the source does not exist or is not a
 | ||||||
|  | 		// directory if asserted to be a directory, as this also causes an
 | ||||||
|  | 		// error.
 | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Separate the source path between its directory and
 | ||||||
|  | 	// the entry in that directory which we are archiving.
 | ||||||
|  | 	sourceDir, sourceBase := SplitPathDirEntry(sourcePath) | ||||||
|  | 	opts := TarResourceRebaseOpts(sourceBase, rebaseName) | ||||||
|  | 
 | ||||||
|  | 	logrus.Debugf("copying %q from %q", sourceBase, sourceDir) | ||||||
|  | 	return TarWithOptions(sourceDir, opts) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
 | ||||||
|  | // parameters to be sent to TarWithOptions (the TarOptions struct)
 | ||||||
|  | func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { | ||||||
|  | 	filter := []string{sourceBase} | ||||||
|  | 	return &TarOptions{ | ||||||
|  | 		Compression:      Uncompressed, | ||||||
|  | 		IncludeFiles:     filter, | ||||||
|  | 		IncludeSourceDir: true, | ||||||
|  | 		RebaseNames: map[string]string{ | ||||||
|  | 			sourceBase: rebaseName, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyInfo holds basic info about the source
 | ||||||
|  | // or destination path of a copy operation.
 | ||||||
|  | type CopyInfo struct { | ||||||
|  | 	Path       string | ||||||
|  | 	Exists     bool | ||||||
|  | 	IsDir      bool | ||||||
|  | 	RebaseName string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyInfoSourcePath stats the given path to create a CopyInfo
 | ||||||
|  | // struct representing that resource for the source of an archive copy
 | ||||||
|  | // operation. The given path should be an absolute local path. A source path
 | ||||||
|  | // has all symlinks evaluated that appear before the last path separator ("/"
 | ||||||
|  | // on Unix). As it is to be a copy source, the path must exist.
 | ||||||
|  | func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { | ||||||
|  | 	// normalize the file path and then evaluate the symbol link
 | ||||||
|  | 	// we will use the target file instead of the symbol link if
 | ||||||
|  | 	// followLink is set
 | ||||||
|  | 	path = normalizePath(path) | ||||||
|  | 
 | ||||||
|  | 	resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return CopyInfo{}, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	stat, err := os.Lstat(resolvedPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return CopyInfo{}, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return CopyInfo{ | ||||||
|  | 		Path:       resolvedPath, | ||||||
|  | 		Exists:     true, | ||||||
|  | 		IsDir:      stat.IsDir(), | ||||||
|  | 		RebaseName: rebaseName, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyInfoDestinationPath stats the given path to create a CopyInfo
 | ||||||
|  | // struct representing that resource for the destination of an archive copy
 | ||||||
|  | // operation. The given path should be an absolute local path.
 | ||||||
|  | func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { | ||||||
|  | 	maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
 | ||||||
|  | 	path = normalizePath(path) | ||||||
|  | 	originalPath := path | ||||||
|  | 
 | ||||||
|  | 	stat, err := os.Lstat(path) | ||||||
|  | 
 | ||||||
|  | 	if err == nil && stat.Mode()&os.ModeSymlink == 0 { | ||||||
|  | 		// The path exists and is not a symlink.
 | ||||||
|  | 		return CopyInfo{ | ||||||
|  | 			Path:   path, | ||||||
|  | 			Exists: true, | ||||||
|  | 			IsDir:  stat.IsDir(), | ||||||
|  | 		}, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// While the path is a symlink.
 | ||||||
|  | 	for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { | ||||||
|  | 		if n > maxSymlinkIter { | ||||||
|  | 			// Don't follow symlinks more than this arbitrary number of times.
 | ||||||
|  | 			return CopyInfo{}, errors.New("too many symlinks in " + originalPath) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// The path is a symbolic link. We need to evaluate it so that the
 | ||||||
|  | 		// destination of the copy operation is the link target and not the
 | ||||||
|  | 		// link itself. This is notably different than CopyInfoSourcePath which
 | ||||||
|  | 		// only evaluates symlinks before the last appearing path separator.
 | ||||||
|  | 		// Also note that it is okay if the last path element is a broken
 | ||||||
|  | 		// symlink as the copy operation should create the target.
 | ||||||
|  | 		var linkTarget string | ||||||
|  | 
 | ||||||
|  | 		linkTarget, err = os.Readlink(path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return CopyInfo{}, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if !system.IsAbs(linkTarget) { | ||||||
|  | 			// Join with the parent directory.
 | ||||||
|  | 			dstParent, _ := SplitPathDirEntry(path) | ||||||
|  | 			linkTarget = filepath.Join(dstParent, linkTarget) | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		path = linkTarget | ||||||
|  | 		stat, err = os.Lstat(path) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if err != nil { | ||||||
|  | 		// It's okay if the destination path doesn't exist. We can still
 | ||||||
|  | 		// continue the copy operation if the parent directory exists.
 | ||||||
|  | 		if !os.IsNotExist(err) { | ||||||
|  | 			return CopyInfo{}, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Ensure destination parent dir exists.
 | ||||||
|  | 		dstParent, _ := SplitPathDirEntry(path) | ||||||
|  | 
 | ||||||
|  | 		parentDirStat, err := os.Stat(dstParent) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return CopyInfo{}, err | ||||||
|  | 		} | ||||||
|  | 		if !parentDirStat.IsDir() { | ||||||
|  | 			return CopyInfo{}, ErrNotDirectory | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		return CopyInfo{Path: path}, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// The path exists after resolving symlinks.
 | ||||||
|  | 	return CopyInfo{ | ||||||
|  | 		Path:   path, | ||||||
|  | 		Exists: true, | ||||||
|  | 		IsDir:  stat.IsDir(), | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // PrepareArchiveCopy prepares the given srcContent archive, which should
 | ||||||
|  | // contain the archived resource described by srcInfo, to the destination
 | ||||||
|  | // described by dstInfo. Returns the possibly modified content archive along
 | ||||||
|  | // with the path to the destination directory which it should be extracted to.
 | ||||||
|  | func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { | ||||||
|  | 	// Ensure in platform semantics
 | ||||||
|  | 	srcInfo.Path = normalizePath(srcInfo.Path) | ||||||
|  | 	dstInfo.Path = normalizePath(dstInfo.Path) | ||||||
|  | 
 | ||||||
|  | 	// Separate the destination path between its directory and base
 | ||||||
|  | 	// components in case the source archive contents need to be rebased.
 | ||||||
|  | 	dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) | ||||||
|  | 	_, srcBase := SplitPathDirEntry(srcInfo.Path) | ||||||
|  | 
 | ||||||
|  | 	switch { | ||||||
|  | 	case dstInfo.Exists && dstInfo.IsDir: | ||||||
|  | 		// The destination exists as a directory. No alteration
 | ||||||
|  | 		// to srcContent is needed as its contents can be
 | ||||||
|  | 		// simply extracted to the destination directory.
 | ||||||
|  | 		return dstInfo.Path, ioutil.NopCloser(srcContent), nil | ||||||
|  | 	case dstInfo.Exists && srcInfo.IsDir: | ||||||
|  | 		// The destination exists as some type of file and the source
 | ||||||
|  | 		// content is a directory. This is an error condition since
 | ||||||
|  | 		// you cannot copy a directory to an existing file location.
 | ||||||
|  | 		return "", nil, ErrCannotCopyDir | ||||||
|  | 	case dstInfo.Exists: | ||||||
|  | 		// The destination exists as some type of file and the source content
 | ||||||
|  | 		// is also a file. The source content entry will have to be renamed to
 | ||||||
|  | 		// have a basename which matches the destination path's basename.
 | ||||||
|  | 		if len(srcInfo.RebaseName) != 0 { | ||||||
|  | 			srcBase = srcInfo.RebaseName | ||||||
|  | 		} | ||||||
|  | 		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||||||
|  | 	case srcInfo.IsDir: | ||||||
|  | 		// The destination does not exist and the source content is an archive
 | ||||||
|  | 		// of a directory. The archive should be extracted to the parent of
 | ||||||
|  | 		// the destination path instead, and when it is, the directory that is
 | ||||||
|  | 		// created as a result should take the name of the destination path.
 | ||||||
|  | 		// The source content entries will have to be renamed to have a
 | ||||||
|  | 		// basename which matches the destination path's basename.
 | ||||||
|  | 		if len(srcInfo.RebaseName) != 0 { | ||||||
|  | 			srcBase = srcInfo.RebaseName | ||||||
|  | 		} | ||||||
|  | 		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||||||
|  | 	case assertsDirectory(dstInfo.Path, os.PathSeparator): | ||||||
|  | 		// The destination does not exist and is asserted to be created as a
 | ||||||
|  | 		// directory, but the source content is not a directory. This is an
 | ||||||
|  | 		// error condition since you cannot create a directory from a file
 | ||||||
|  | 		// source.
 | ||||||
|  | 		return "", nil, ErrDirNotExists | ||||||
|  | 	default: | ||||||
|  | 		// The last remaining case is when the destination does not exist, is
 | ||||||
|  | 		// not asserted to be a directory, and the source content is not an
 | ||||||
|  | 		// archive of a directory. It this case, the destination file will need
 | ||||||
|  | 		// to be created when the archive is extracted and the source content
 | ||||||
|  | 		// entry will have to be renamed to have a basename which matches the
 | ||||||
|  | 		// destination path's basename.
 | ||||||
|  | 		if len(srcInfo.RebaseName) != 0 { | ||||||
|  | 			srcBase = srcInfo.RebaseName | ||||||
|  | 		} | ||||||
|  | 		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RebaseArchiveEntries rewrites the given srcContent archive replacing
 | ||||||
|  | // an occurrence of oldBase with newBase at the beginning of entry names.
 | ||||||
|  | func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { | ||||||
|  | 	if oldBase == string(os.PathSeparator) { | ||||||
|  | 		// If oldBase specifies the root directory, use an empty string as
 | ||||||
|  | 		// oldBase instead so that newBase doesn't replace the path separator
 | ||||||
|  | 		// that all paths will start with.
 | ||||||
|  | 		oldBase = "" | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	rebased, w := io.Pipe() | ||||||
|  | 
 | ||||||
|  | 	go func() { | ||||||
|  | 		srcTar := tar.NewReader(srcContent) | ||||||
|  | 		rebasedTar := tar.NewWriter(w) | ||||||
|  | 
 | ||||||
|  | 		for { | ||||||
|  | 			hdr, err := srcTar.Next() | ||||||
|  | 			if err == io.EOF { | ||||||
|  | 				// Signals end of archive.
 | ||||||
|  | 				rebasedTar.Close() | ||||||
|  | 				w.Close() | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			if err != nil { | ||||||
|  | 				w.CloseWithError(err) | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) | ||||||
|  | 			if hdr.Typeflag == tar.TypeLink { | ||||||
|  | 				hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if err = rebasedTar.WriteHeader(hdr); err != nil { | ||||||
|  | 				w.CloseWithError(err) | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if _, err = io.Copy(rebasedTar, srcTar); err != nil { | ||||||
|  | 				w.CloseWithError(err) | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 
 | ||||||
|  | 	return rebased | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // TODO @gupta-ak. These might have to be changed in the future to be
 | ||||||
|  | // continuity driver aware as well to support LCOW.
 | ||||||
|  | 
 | ||||||
|  | // CopyResource performs an archive copy from the given source path to the
 | ||||||
|  | // given destination path. The source path MUST exist and the destination
 | ||||||
|  | // path's parent directory must exist.
 | ||||||
|  | func CopyResource(srcPath, dstPath string, followLink bool) error { | ||||||
|  | 	var ( | ||||||
|  | 		srcInfo CopyInfo | ||||||
|  | 		err     error | ||||||
|  | 	) | ||||||
|  | 
 | ||||||
|  | 	// Ensure in platform semantics
 | ||||||
|  | 	srcPath = normalizePath(srcPath) | ||||||
|  | 	dstPath = normalizePath(dstPath) | ||||||
|  | 
 | ||||||
|  | 	// Clean the source and destination paths.
 | ||||||
|  | 	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) | ||||||
|  | 	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) | ||||||
|  | 
 | ||||||
|  | 	if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	content, err := TarResource(srcInfo) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer content.Close() | ||||||
|  | 
 | ||||||
|  | 	return CopyTo(content, srcInfo, dstPath) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyTo handles extracting the given content whose
 | ||||||
|  | // entries should be sourced from srcInfo to dstPath.
 | ||||||
|  | func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { | ||||||
|  | 	// The destination path need not exist, but CopyInfoDestinationPath will
 | ||||||
|  | 	// ensure that at least the parent directory exists.
 | ||||||
|  | 	dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer copyArchive.Close() | ||||||
|  | 
 | ||||||
|  | 	options := &TarOptions{ | ||||||
|  | 		NoLchown:             true, | ||||||
|  | 		NoOverwriteDirNonDir: true, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return Untar(copyArchive, dstDir, options) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ResolveHostSourcePath decides real path need to be copied with parameters such as
 | ||||||
|  | // whether to follow symbol link or not, if followLink is true, resolvedPath will return
 | ||||||
|  | // link target of any symbol link file, else it will only resolve symlink of directory
 | ||||||
|  | // but return symbol link file itself without resolving.
 | ||||||
|  | func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { | ||||||
|  | 	if followLink { | ||||||
|  | 		resolvedPath, err = filepath.EvalSymlinks(path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) | ||||||
|  | 	} else { | ||||||
|  | 		dirPath, basePath := filepath.Split(path) | ||||||
|  | 
 | ||||||
|  | 		// if not follow symbol link, then resolve symbol link of parent dir
 | ||||||
|  | 		var resolvedDirPath string | ||||||
|  | 		resolvedDirPath, err = filepath.EvalSymlinks(dirPath) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		// resolvedDirPath will have been cleaned (no trailing path separators) so
 | ||||||
|  | 		// we can manually join it with the base path element.
 | ||||||
|  | 		resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath | ||||||
|  | 		if hasTrailingPathSeparator(path, os.PathSeparator) && | ||||||
|  | 			filepath.Base(path) != filepath.Base(resolvedPath) { | ||||||
|  | 			rebaseName = filepath.Base(path) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return resolvedPath, rebaseName, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetRebaseName normalizes and compares path and resolvedPath,
 | ||||||
|  | // return completed resolved path and rebased file name
 | ||||||
|  | func GetRebaseName(path, resolvedPath string) (string, string) { | ||||||
|  | 	// linkTarget will have been cleaned (no trailing path separators and dot) so
 | ||||||
|  | 	// we can manually join it with them
 | ||||||
|  | 	var rebaseName string | ||||||
|  | 	if specifiesCurrentDir(path) && | ||||||
|  | 		!specifiesCurrentDir(resolvedPath) { | ||||||
|  | 		resolvedPath += string(filepath.Separator) + "." | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if hasTrailingPathSeparator(path, os.PathSeparator) && | ||||||
|  | 		!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { | ||||||
|  | 		resolvedPath += string(filepath.Separator) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if filepath.Base(path) != filepath.Base(resolvedPath) { | ||||||
|  | 		// In the case where the path had a trailing separator and a symlink
 | ||||||
|  | 		// evaluation has changed the last path component, we will need to
 | ||||||
|  | 		// rebase the name in the archive that is being copied to match the
 | ||||||
|  | 		// originally requested name.
 | ||||||
|  | 		rebaseName = filepath.Base(path) | ||||||
|  | 	} | ||||||
|  | 	return resolvedPath, rebaseName | ||||||
|  | } | ||||||
|  | @ -0,0 +1,11 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"path/filepath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func normalizePath(path string) string { | ||||||
|  | 	return filepath.ToSlash(path) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,9 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"path/filepath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func normalizePath(path string) string { | ||||||
|  | 	return filepath.FromSlash(path) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,256 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"runtime" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/idtools" | ||||||
|  | 	"github.com/docker/docker/pkg/pools" | ||||||
|  | 	"github.com/docker/docker/pkg/system" | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
 | ||||||
|  | // compressed or uncompressed.
 | ||||||
|  | // Returns the size in bytes of the contents of the layer.
 | ||||||
|  | func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { | ||||||
|  | 	tr := tar.NewReader(layer) | ||||||
|  | 	trBuf := pools.BufioReader32KPool.Get(tr) | ||||||
|  | 	defer pools.BufioReader32KPool.Put(trBuf) | ||||||
|  | 
 | ||||||
|  | 	var dirs []*tar.Header | ||||||
|  | 	unpackedPaths := make(map[string]struct{}) | ||||||
|  | 
 | ||||||
|  | 	if options == nil { | ||||||
|  | 		options = &TarOptions{} | ||||||
|  | 	} | ||||||
|  | 	if options.ExcludePatterns == nil { | ||||||
|  | 		options.ExcludePatterns = []string{} | ||||||
|  | 	} | ||||||
|  | 	idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) | ||||||
|  | 
 | ||||||
|  | 	aufsTempdir := "" | ||||||
|  | 	aufsHardlinks := make(map[string]*tar.Header) | ||||||
|  | 
 | ||||||
|  | 	// Iterate through the files in the archive.
 | ||||||
|  | 	for { | ||||||
|  | 		hdr, err := tr.Next() | ||||||
|  | 		if err == io.EOF { | ||||||
|  | 			// end of tar archive
 | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 		if err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		size += hdr.Size | ||||||
|  | 
 | ||||||
|  | 		// Normalize name, for safety and for a simple is-root check
 | ||||||
|  | 		hdr.Name = filepath.Clean(hdr.Name) | ||||||
|  | 
 | ||||||
|  | 		// Windows does not support filenames with colons in them. Ignore
 | ||||||
|  | 		// these files. This is not a problem though (although it might
 | ||||||
|  | 		// appear that it is). Let's suppose a client is running docker pull.
 | ||||||
|  | 		// The daemon it points to is Windows. Would it make sense for the
 | ||||||
|  | 		// client to be doing a docker pull Ubuntu for example (which has files
 | ||||||
|  | 		// with colons in the name under /usr/share/man/man3)? No, absolutely
 | ||||||
|  | 		// not as it would really only make sense that they were pulling a
 | ||||||
|  | 		// Windows image. However, for development, it is necessary to be able
 | ||||||
|  | 		// to pull Linux images which are in the repository.
 | ||||||
|  | 		//
 | ||||||
|  | 		// TODO Windows. Once the registry is aware of what images are Windows-
 | ||||||
|  | 		// specific or Linux-specific, this warning should be changed to an error
 | ||||||
|  | 		// to cater for the situation where someone does manage to upload a Linux
 | ||||||
|  | 		// image but have it tagged as Windows inadvertently.
 | ||||||
|  | 		if runtime.GOOS == "windows" { | ||||||
|  | 			if strings.Contains(hdr.Name, ":") { | ||||||
|  | 				logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Note as these operations are platform specific, so must the slash be.
 | ||||||
|  | 		if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { | ||||||
|  | 			// Not the root directory, ensure that the parent directory exists.
 | ||||||
|  | 			// This happened in some tests where an image had a tarfile without any
 | ||||||
|  | 			// parent directories.
 | ||||||
|  | 			parent := filepath.Dir(hdr.Name) | ||||||
|  | 			parentPath := filepath.Join(dest, parent) | ||||||
|  | 
 | ||||||
|  | 			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { | ||||||
|  | 				err = system.MkdirAll(parentPath, 0600, "") | ||||||
|  | 				if err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Skip AUFS metadata dirs
 | ||||||
|  | 		if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { | ||||||
|  | 			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
 | ||||||
|  | 			// We don't want this directory, but we need the files in them so that
 | ||||||
|  | 			// such hardlinks can be resolved.
 | ||||||
|  | 			if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { | ||||||
|  | 				basename := filepath.Base(hdr.Name) | ||||||
|  | 				aufsHardlinks[basename] = hdr | ||||||
|  | 				if aufsTempdir == "" { | ||||||
|  | 					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { | ||||||
|  | 						return 0, err | ||||||
|  | 					} | ||||||
|  | 					defer os.RemoveAll(aufsTempdir) | ||||||
|  | 				} | ||||||
|  | 				if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if hdr.Name != WhiteoutOpaqueDir { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		path := filepath.Join(dest, hdr.Name) | ||||||
|  | 		rel, err := filepath.Rel(dest, path) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Note as these operations are platform specific, so must the slash be.
 | ||||||
|  | 		if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { | ||||||
|  | 			return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) | ||||||
|  | 		} | ||||||
|  | 		base := filepath.Base(path) | ||||||
|  | 
 | ||||||
|  | 		if strings.HasPrefix(base, WhiteoutPrefix) { | ||||||
|  | 			dir := filepath.Dir(path) | ||||||
|  | 			if base == WhiteoutOpaqueDir { | ||||||
|  | 				_, err := os.Lstat(dir) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { | ||||||
|  | 					if err != nil { | ||||||
|  | 						if os.IsNotExist(err) { | ||||||
|  | 							err = nil // parent was deleted
 | ||||||
|  | 						} | ||||||
|  | 						return err | ||||||
|  | 					} | ||||||
|  | 					if path == dir { | ||||||
|  | 						return nil | ||||||
|  | 					} | ||||||
|  | 					if _, exists := unpackedPaths[path]; !exists { | ||||||
|  | 						err := os.RemoveAll(path) | ||||||
|  | 						return err | ||||||
|  | 					} | ||||||
|  | 					return nil | ||||||
|  | 				}) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
|  | 				originalBase := base[len(WhiteoutPrefix):] | ||||||
|  | 				originalPath := filepath.Join(dir, originalBase) | ||||||
|  | 				if err := os.RemoveAll(originalPath); err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			// If path exits we almost always just want to remove and replace it.
 | ||||||
|  | 			// The only exception is when it is a directory *and* the file from
 | ||||||
|  | 			// the layer is also a directory. Then we want to merge them (i.e.
 | ||||||
|  | 			// just apply the metadata from the layer).
 | ||||||
|  | 			if fi, err := os.Lstat(path); err == nil { | ||||||
|  | 				if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { | ||||||
|  | 					if err := os.RemoveAll(path); err != nil { | ||||||
|  | 						return 0, err | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			trBuf.Reset(tr) | ||||||
|  | 			srcData := io.Reader(trBuf) | ||||||
|  | 			srcHdr := hdr | ||||||
|  | 
 | ||||||
|  | 			// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
 | ||||||
|  | 			// we manually retarget these into the temporary files we extracted them into
 | ||||||
|  | 			if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { | ||||||
|  | 				linkBasename := filepath.Base(hdr.Linkname) | ||||||
|  | 				srcHdr = aufsHardlinks[linkBasename] | ||||||
|  | 				if srcHdr == nil { | ||||||
|  | 					return 0, fmt.Errorf("Invalid aufs hardlink") | ||||||
|  | 				} | ||||||
|  | 				tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return 0, err | ||||||
|  | 				} | ||||||
|  | 				defer tmpFile.Close() | ||||||
|  | 				srcData = tmpFile | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if err := remapIDs(idMappings, srcHdr); err != nil { | ||||||
|  | 				return 0, err | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { | ||||||
|  | 				return 0, err | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			// Directory mtimes must be handled at the end to avoid further
 | ||||||
|  | 			// file creation in them to modify the directory mtime
 | ||||||
|  | 			if hdr.Typeflag == tar.TypeDir { | ||||||
|  | 				dirs = append(dirs, hdr) | ||||||
|  | 			} | ||||||
|  | 			unpackedPaths[path] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for _, hdr := range dirs { | ||||||
|  | 		path := filepath.Join(dest, hdr.Name) | ||||||
|  | 		if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return size, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ApplyLayer parses a diff in the standard layer format from `layer`,
 | ||||||
|  | // and applies it to the directory `dest`. The stream `layer` can be
 | ||||||
|  | // compressed or uncompressed.
 | ||||||
|  | // Returns the size in bytes of the contents of the layer.
 | ||||||
|  | func ApplyLayer(dest string, layer io.Reader) (int64, error) { | ||||||
|  | 	return applyLayerHandler(dest, layer, &TarOptions{}, true) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ApplyUncompressedLayer parses a diff in the standard layer format from
 | ||||||
|  | // `layer`, and applies it to the directory `dest`. The stream `layer`
 | ||||||
|  | // can only be uncompressed.
 | ||||||
|  | // Returns the size in bytes of the contents of the layer.
 | ||||||
|  | func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { | ||||||
|  | 	return applyLayerHandler(dest, layer, options, false) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // do the bulk load of ApplyLayer, but allow for not calling DecompressStream
 | ||||||
|  | func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { | ||||||
|  | 	dest = filepath.Clean(dest) | ||||||
|  | 
 | ||||||
|  | 	// We need to be able to set any perms
 | ||||||
|  | 	oldmask, err := system.Umask(0) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
 | ||||||
|  | 
 | ||||||
|  | 	if decompress { | ||||||
|  | 		layer, err = DecompressStream(layer) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return UnpackLayer(dest, layer, options) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,97 @@ | ||||||
|  | // +build ignore
 | ||||||
|  | 
 | ||||||
|  | // Simple tool to create an archive stream from an old and new directory
 | ||||||
|  | //
 | ||||||
|  | // By default it will stream the comparison of two temporary directories with junk files
 | ||||||
|  | package main | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"flag" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 	"path" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/archive" | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	flDebug  = flag.Bool("D", false, "debugging output") | ||||||
|  | 	flNewDir = flag.String("newdir", "", "") | ||||||
|  | 	flOldDir = flag.String("olddir", "", "") | ||||||
|  | 	log      = logrus.New() | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func main() { | ||||||
|  | 	flag.Usage = func() { | ||||||
|  | 		fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") | ||||||
|  | 		fmt.Printf("%s [OPTIONS]\n", os.Args[0]) | ||||||
|  | 		flag.PrintDefaults() | ||||||
|  | 	} | ||||||
|  | 	flag.Parse() | ||||||
|  | 	log.Out = os.Stderr | ||||||
|  | 	if (len(os.Getenv("DEBUG")) > 0) || *flDebug { | ||||||
|  | 		logrus.SetLevel(logrus.DebugLevel) | ||||||
|  | 	} | ||||||
|  | 	var newDir, oldDir string | ||||||
|  | 
 | ||||||
|  | 	if len(*flNewDir) == 0 { | ||||||
|  | 		var err error | ||||||
|  | 		newDir, err = ioutil.TempDir("", "docker-test-newDir") | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Fatal(err) | ||||||
|  | 		} | ||||||
|  | 		defer os.RemoveAll(newDir) | ||||||
|  | 		if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { | ||||||
|  | 			log.Fatal(err) | ||||||
|  | 		} | ||||||
|  | 	} else { | ||||||
|  | 		newDir = *flNewDir | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if len(*flOldDir) == 0 { | ||||||
|  | 		oldDir, err := ioutil.TempDir("", "docker-test-oldDir") | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Fatal(err) | ||||||
|  | 		} | ||||||
|  | 		defer os.RemoveAll(oldDir) | ||||||
|  | 	} else { | ||||||
|  | 		oldDir = *flOldDir | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	changes, err := archive.ChangesDirs(newDir, oldDir) | ||||||
|  | 	if err != nil { | ||||||
|  | 		log.Fatal(err) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	a, err := archive.ExportChanges(newDir, changes) | ||||||
|  | 	if err != nil { | ||||||
|  | 		log.Fatal(err) | ||||||
|  | 	} | ||||||
|  | 	defer a.Close() | ||||||
|  | 
 | ||||||
|  | 	i, err := io.Copy(os.Stdout, a) | ||||||
|  | 	if err != nil && err != io.EOF { | ||||||
|  | 		log.Fatal(err) | ||||||
|  | 	} | ||||||
|  | 	fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { | ||||||
|  | 	fileData := []byte("fooo") | ||||||
|  | 	for n := 0; n < numberOfFiles; n++ { | ||||||
|  | 		fileName := fmt.Sprintf("file-%d", n) | ||||||
|  | 		if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 		if makeLinks { | ||||||
|  | 			if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { | ||||||
|  | 				return 0, err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	totalSize := numberOfFiles * len(fileData) | ||||||
|  | 	return totalSize, nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,16 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"syscall" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func timeToTimespec(time time.Time) (ts syscall.Timespec) { | ||||||
|  | 	if time.IsZero() { | ||||||
|  | 		// Return UTIME_OMIT special value
 | ||||||
|  | 		ts.Sec = 0 | ||||||
|  | 		ts.Nsec = ((1 << 30) - 2) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	return syscall.NsecToTimespec(time.UnixNano()) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,16 @@ | ||||||
|  | // +build !linux
 | ||||||
|  | 
 | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"syscall" | ||||||
|  | 	"time" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func timeToTimespec(time time.Time) (ts syscall.Timespec) { | ||||||
|  | 	nsec := int64(0) | ||||||
|  | 	if !time.IsZero() { | ||||||
|  | 		nsec = time.UnixNano() | ||||||
|  | 	} | ||||||
|  | 	return syscall.NsecToTimespec(nsec) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,23 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | // Whiteouts are files with a special meaning for the layered filesystem.
 | ||||||
|  | // Docker uses AUFS whiteout files inside exported archives. In other
 | ||||||
|  | // filesystems these files are generated/handled on tar creation/extraction.
 | ||||||
|  | 
 | ||||||
|  | // WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
 | ||||||
|  | // filename this means that file has been removed from the base layer.
 | ||||||
|  | const WhiteoutPrefix = ".wh." | ||||||
|  | 
 | ||||||
|  | // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
 | ||||||
|  | // for removing an actual file. Normally these files are excluded from exported
 | ||||||
|  | // archives.
 | ||||||
|  | const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix | ||||||
|  | 
 | ||||||
|  | // WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
 | ||||||
|  | // layers. Normally these should not go into exported archives and all changed
 | ||||||
|  | // hardlinks should be copied to the top layer.
 | ||||||
|  | const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" | ||||||
|  | 
 | ||||||
|  | // WhiteoutOpaqueDir file means directory has been made opaque - meaning
 | ||||||
|  | // readdir calls to this directory do not follow to lower layers.
 | ||||||
|  | const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" | ||||||
|  | @ -0,0 +1,59 @@ | ||||||
|  | package archive // import "github.com/docker/docker/pkg/archive"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"archive/tar" | ||||||
|  | 	"bytes" | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Generate generates a new archive from the content provided
 | ||||||
|  | // as input.
 | ||||||
|  | //
 | ||||||
|  | // `files` is a sequence of path/content pairs. A new file is
 | ||||||
|  | // added to the archive for each pair.
 | ||||||
|  | // If the last pair is incomplete, the file is created with an
 | ||||||
|  | // empty content. For example:
 | ||||||
|  | //
 | ||||||
|  | // Generate("foo.txt", "hello world", "emptyfile")
 | ||||||
|  | //
 | ||||||
|  | // The above call will return an archive with 2 files:
 | ||||||
|  | //  * ./foo.txt with content "hello world"
 | ||||||
|  | //  * ./empty with empty content
 | ||||||
|  | //
 | ||||||
|  | // FIXME: stream content instead of buffering
 | ||||||
|  | // FIXME: specify permissions and other archive metadata
 | ||||||
|  | func Generate(input ...string) (io.Reader, error) { | ||||||
|  | 	files := parseStringPairs(input...) | ||||||
|  | 	buf := new(bytes.Buffer) | ||||||
|  | 	tw := tar.NewWriter(buf) | ||||||
|  | 	for _, file := range files { | ||||||
|  | 		name, content := file[0], file[1] | ||||||
|  | 		hdr := &tar.Header{ | ||||||
|  | 			Name: name, | ||||||
|  | 			Size: int64(len(content)), | ||||||
|  | 		} | ||||||
|  | 		if err := tw.WriteHeader(hdr); err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		if _, err := tw.Write([]byte(content)); err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if err := tw.Close(); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return buf, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func parseStringPairs(input ...string) (output [][2]string) { | ||||||
|  | 	output = make([][2]string, 0, len(input)/2+1) | ||||||
|  | 	for i := 0; i < len(input); i += 2 { | ||||||
|  | 		var pair [2]string | ||||||
|  | 		pair[0] = input[i] | ||||||
|  | 		if i+1 < len(input) { | ||||||
|  | 			pair[1] = input[i+1] | ||||||
|  | 		} | ||||||
|  | 		output = append(output, pair) | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | @ -0,0 +1,298 @@ | ||||||
|  | package fileutils // import "github.com/docker/docker/pkg/fileutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"fmt" | ||||||
|  | 	"io" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"regexp" | ||||||
|  | 	"strings" | ||||||
|  | 	"text/scanner" | ||||||
|  | 
 | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // PatternMatcher allows checking paths agaist a list of patterns
 | ||||||
|  | type PatternMatcher struct { | ||||||
|  | 	patterns   []*Pattern | ||||||
|  | 	exclusions bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewPatternMatcher creates a new matcher object for specific patterns that can
 | ||||||
|  | // be used later to match against patterns against paths
 | ||||||
|  | func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { | ||||||
|  | 	pm := &PatternMatcher{ | ||||||
|  | 		patterns: make([]*Pattern, 0, len(patterns)), | ||||||
|  | 	} | ||||||
|  | 	for _, p := range patterns { | ||||||
|  | 		// Eliminate leading and trailing whitespace.
 | ||||||
|  | 		p = strings.TrimSpace(p) | ||||||
|  | 		if p == "" { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		p = filepath.Clean(p) | ||||||
|  | 		newp := &Pattern{} | ||||||
|  | 		if p[0] == '!' { | ||||||
|  | 			if len(p) == 1 { | ||||||
|  | 				return nil, errors.New("illegal exclusion pattern: \"!\"") | ||||||
|  | 			} | ||||||
|  | 			newp.exclusion = true | ||||||
|  | 			p = p[1:] | ||||||
|  | 			pm.exclusions = true | ||||||
|  | 		} | ||||||
|  | 		// Do some syntax checking on the pattern.
 | ||||||
|  | 		// filepath's Match() has some really weird rules that are inconsistent
 | ||||||
|  | 		// so instead of trying to dup their logic, just call Match() for its
 | ||||||
|  | 		// error state and if there is an error in the pattern return it.
 | ||||||
|  | 		// If this becomes an issue we can remove this since its really only
 | ||||||
|  | 		// needed in the error (syntax) case - which isn't really critical.
 | ||||||
|  | 		if _, err := filepath.Match(p, "."); err != nil { | ||||||
|  | 			return nil, err | ||||||
|  | 		} | ||||||
|  | 		newp.cleanedPattern = p | ||||||
|  | 		newp.dirs = strings.Split(p, string(os.PathSeparator)) | ||||||
|  | 		pm.patterns = append(pm.patterns, newp) | ||||||
|  | 	} | ||||||
|  | 	return pm, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Matches matches path against all the patterns. Matches is not safe to be
 | ||||||
|  | // called concurrently
 | ||||||
|  | func (pm *PatternMatcher) Matches(file string) (bool, error) { | ||||||
|  | 	matched := false | ||||||
|  | 	file = filepath.FromSlash(file) | ||||||
|  | 	parentPath := filepath.Dir(file) | ||||||
|  | 	parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) | ||||||
|  | 
 | ||||||
|  | 	for _, pattern := range pm.patterns { | ||||||
|  | 		negative := false | ||||||
|  | 
 | ||||||
|  | 		if pattern.exclusion { | ||||||
|  | 			negative = true | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		match, err := pattern.match(file) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return false, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if !match && parentPath != "." { | ||||||
|  | 			// Check to see if the pattern matches one of our parent dirs.
 | ||||||
|  | 			if len(pattern.dirs) <= len(parentPathDirs) { | ||||||
|  | 				match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if match { | ||||||
|  | 			matched = !negative | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if matched { | ||||||
|  | 		logrus.Debugf("Skipping excluded path: %s", file) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return matched, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Exclusions returns true if any of the patterns define exclusions
 | ||||||
|  | func (pm *PatternMatcher) Exclusions() bool { | ||||||
|  | 	return pm.exclusions | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Patterns returns array of active patterns
 | ||||||
|  | func (pm *PatternMatcher) Patterns() []*Pattern { | ||||||
|  | 	return pm.patterns | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Pattern defines a single regexp used used to filter file paths.
 | ||||||
|  | type Pattern struct { | ||||||
|  | 	cleanedPattern string | ||||||
|  | 	dirs           []string | ||||||
|  | 	regexp         *regexp.Regexp | ||||||
|  | 	exclusion      bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *Pattern) String() string { | ||||||
|  | 	return p.cleanedPattern | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Exclusion returns true if this pattern defines exclusion
 | ||||||
|  | func (p *Pattern) Exclusion() bool { | ||||||
|  | 	return p.exclusion | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *Pattern) match(path string) (bool, error) { | ||||||
|  | 
 | ||||||
|  | 	if p.regexp == nil { | ||||||
|  | 		if err := p.compile(); err != nil { | ||||||
|  | 			return false, filepath.ErrBadPattern | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	b := p.regexp.MatchString(path) | ||||||
|  | 
 | ||||||
|  | 	return b, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (p *Pattern) compile() error { | ||||||
|  | 	regStr := "^" | ||||||
|  | 	pattern := p.cleanedPattern | ||||||
|  | 	// Go through the pattern and convert it to a regexp.
 | ||||||
|  | 	// We use a scanner so we can support utf-8 chars.
 | ||||||
|  | 	var scan scanner.Scanner | ||||||
|  | 	scan.Init(strings.NewReader(pattern)) | ||||||
|  | 
 | ||||||
|  | 	sl := string(os.PathSeparator) | ||||||
|  | 	escSL := sl | ||||||
|  | 	if sl == `\` { | ||||||
|  | 		escSL += `\` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for scan.Peek() != scanner.EOF { | ||||||
|  | 		ch := scan.Next() | ||||||
|  | 
 | ||||||
|  | 		if ch == '*' { | ||||||
|  | 			if scan.Peek() == '*' { | ||||||
|  | 				// is some flavor of "**"
 | ||||||
|  | 				scan.Next() | ||||||
|  | 
 | ||||||
|  | 				// Treat **/ as ** so eat the "/"
 | ||||||
|  | 				if string(scan.Peek()) == sl { | ||||||
|  | 					scan.Next() | ||||||
|  | 				} | ||||||
|  | 
 | ||||||
|  | 				if scan.Peek() == scanner.EOF { | ||||||
|  | 					// is "**EOF" - to align with .gitignore just accept all
 | ||||||
|  | 					regStr += ".*" | ||||||
|  | 				} else { | ||||||
|  | 					// is "**"
 | ||||||
|  | 					// Note that this allows for any # of /'s (even 0) because
 | ||||||
|  | 					// the .* will eat everything, even /'s
 | ||||||
|  | 					regStr += "(.*" + escSL + ")?" | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
|  | 				// is "*" so map it to anything but "/"
 | ||||||
|  | 				regStr += "[^" + escSL + "]*" | ||||||
|  | 			} | ||||||
|  | 		} else if ch == '?' { | ||||||
|  | 			// "?" is any char except "/"
 | ||||||
|  | 			regStr += "[^" + escSL + "]" | ||||||
|  | 		} else if ch == '.' || ch == '$' { | ||||||
|  | 			// Escape some regexp special chars that have no meaning
 | ||||||
|  | 			// in golang's filepath.Match
 | ||||||
|  | 			regStr += `\` + string(ch) | ||||||
|  | 		} else if ch == '\\' { | ||||||
|  | 			// escape next char. Note that a trailing \ in the pattern
 | ||||||
|  | 			// will be left alone (but need to escape it)
 | ||||||
|  | 			if sl == `\` { | ||||||
|  | 				// On windows map "\" to "\\", meaning an escaped backslash,
 | ||||||
|  | 				// and then just continue because filepath.Match on
 | ||||||
|  | 				// Windows doesn't allow escaping at all
 | ||||||
|  | 				regStr += escSL | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 			if scan.Peek() != scanner.EOF { | ||||||
|  | 				regStr += `\` + string(scan.Next()) | ||||||
|  | 			} else { | ||||||
|  | 				regStr += `\` | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			regStr += string(ch) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	regStr += "$" | ||||||
|  | 
 | ||||||
|  | 	re, err := regexp.Compile(regStr) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	p.regexp = re | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Matches returns true if file matches any of the patterns
 | ||||||
|  | // and isn't excluded by any of the subsequent patterns.
 | ||||||
|  | func Matches(file string, patterns []string) (bool, error) { | ||||||
|  | 	pm, err := NewPatternMatcher(patterns) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return false, err | ||||||
|  | 	} | ||||||
|  | 	file = filepath.Clean(file) | ||||||
|  | 
 | ||||||
|  | 	if file == "." { | ||||||
|  | 		// Don't let them exclude everything, kind of silly.
 | ||||||
|  | 		return false, nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return pm.Matches(file) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CopyFile copies from src to dst until either EOF is reached
 | ||||||
|  | // on src or an error occurs. It verifies src exists and removes
 | ||||||
|  | // the dst if it exists.
 | ||||||
|  | func CopyFile(src, dst string) (int64, error) { | ||||||
|  | 	cleanSrc := filepath.Clean(src) | ||||||
|  | 	cleanDst := filepath.Clean(dst) | ||||||
|  | 	if cleanSrc == cleanDst { | ||||||
|  | 		return 0, nil | ||||||
|  | 	} | ||||||
|  | 	sf, err := os.Open(cleanSrc) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	defer sf.Close() | ||||||
|  | 	if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	df, err := os.Create(cleanDst) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return 0, err | ||||||
|  | 	} | ||||||
|  | 	defer df.Close() | ||||||
|  | 	return io.Copy(df, sf) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ReadSymlinkedDirectory returns the target directory of a symlink.
 | ||||||
|  | // The target of the symbolic link may not be a file.
 | ||||||
|  | func ReadSymlinkedDirectory(path string) (string, error) { | ||||||
|  | 	var realPath string | ||||||
|  | 	var err error | ||||||
|  | 	if realPath, err = filepath.Abs(path); err != nil { | ||||||
|  | 		return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) | ||||||
|  | 	} | ||||||
|  | 	if realPath, err = filepath.EvalSymlinks(realPath); err != nil { | ||||||
|  | 		return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) | ||||||
|  | 	} | ||||||
|  | 	realPathInfo, err := os.Stat(realPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) | ||||||
|  | 	} | ||||||
|  | 	if !realPathInfo.Mode().IsDir() { | ||||||
|  | 		return "", fmt.Errorf("canonical path points to a file '%s'", realPath) | ||||||
|  | 	} | ||||||
|  | 	return realPath, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CreateIfNotExists creates a file or a directory only if it does not already exist.
 | ||||||
|  | func CreateIfNotExists(path string, isDir bool) error { | ||||||
|  | 	if _, err := os.Stat(path); err != nil { | ||||||
|  | 		if os.IsNotExist(err) { | ||||||
|  | 			if isDir { | ||||||
|  | 				return os.MkdirAll(path, 0755) | ||||||
|  | 			} | ||||||
|  | 			if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			f, err := os.OpenFile(path, os.O_CREATE, 0755) | ||||||
|  | 			if err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 			f.Close() | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,27 @@ | ||||||
|  | package fileutils // import "github.com/docker/docker/pkg/fileutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"os/exec" | ||||||
|  | 	"strconv" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // GetTotalUsedFds returns the number of used File Descriptors by
 | ||||||
|  | // executing `lsof -p PID`
 | ||||||
|  | func GetTotalUsedFds() int { | ||||||
|  | 	pid := os.Getpid() | ||||||
|  | 
 | ||||||
|  | 	cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) | ||||||
|  | 
 | ||||||
|  | 	output, err := cmd.CombinedOutput() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return -1 | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	outputStr := strings.TrimSpace(string(output)) | ||||||
|  | 
 | ||||||
|  | 	fds := strings.Split(outputStr, "\n") | ||||||
|  | 
 | ||||||
|  | 	return len(fds) - 1 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,22 @@ | ||||||
|  | // +build linux freebsd
 | ||||||
|  | 
 | ||||||
|  | package fileutils // import "github.com/docker/docker/pkg/fileutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"fmt" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 
 | ||||||
|  | 	"github.com/sirupsen/logrus" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // GetTotalUsedFds Returns the number of used File Descriptors by
 | ||||||
|  | // reading it via /proc filesystem.
 | ||||||
|  | func GetTotalUsedFds() int { | ||||||
|  | 	if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { | ||||||
|  | 		logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) | ||||||
|  | 	} else { | ||||||
|  | 		return len(fds) | ||||||
|  | 	} | ||||||
|  | 	return -1 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,7 @@ | ||||||
|  | package fileutils // import "github.com/docker/docker/pkg/fileutils"
 | ||||||
|  | 
 | ||||||
|  | // GetTotalUsedFds Returns the number of used File Descriptors. Not supported
 | ||||||
|  | // on Windows.
 | ||||||
|  | func GetTotalUsedFds() int { | ||||||
|  | 	return -1 | ||||||
|  | } | ||||||
|  | @ -0,0 +1,51 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | var errBufferFull = errors.New("buffer is full") | ||||||
|  | 
 | ||||||
|  | type fixedBuffer struct { | ||||||
|  | 	buf      []byte | ||||||
|  | 	pos      int | ||||||
|  | 	lastRead int | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) Write(p []byte) (int, error) { | ||||||
|  | 	n := copy(b.buf[b.pos:cap(b.buf)], p) | ||||||
|  | 	b.pos += n | ||||||
|  | 
 | ||||||
|  | 	if n < len(p) { | ||||||
|  | 		if b.pos == cap(b.buf) { | ||||||
|  | 			return n, errBufferFull | ||||||
|  | 		} | ||||||
|  | 		return n, io.ErrShortWrite | ||||||
|  | 	} | ||||||
|  | 	return n, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) Read(p []byte) (int, error) { | ||||||
|  | 	n := copy(p, b.buf[b.lastRead:b.pos]) | ||||||
|  | 	b.lastRead += n | ||||||
|  | 	return n, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) Len() int { | ||||||
|  | 	return b.pos - b.lastRead | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) Cap() int { | ||||||
|  | 	return cap(b.buf) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) Reset() { | ||||||
|  | 	b.pos = 0 | ||||||
|  | 	b.lastRead = 0 | ||||||
|  | 	b.buf = b.buf[:0] | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *fixedBuffer) String() string { | ||||||
|  | 	return string(b.buf[b.lastRead:b.pos]) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,186 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"errors" | ||||||
|  | 	"io" | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // maxCap is the highest capacity to use in byte slices that buffer data.
 | ||||||
|  | const maxCap = 1e6 | ||||||
|  | 
 | ||||||
|  | // minCap is the lowest capacity to use in byte slices that buffer data
 | ||||||
|  | const minCap = 64 | ||||||
|  | 
 | ||||||
|  | // blockThreshold is the minimum number of bytes in the buffer which will cause
 | ||||||
|  | // a write to BytesPipe to block when allocating a new slice.
 | ||||||
|  | const blockThreshold = 1e6 | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// ErrClosed is returned when Write is called on a closed BytesPipe.
 | ||||||
|  | 	ErrClosed = errors.New("write to closed BytesPipe") | ||||||
|  | 
 | ||||||
|  | 	bufPools     = make(map[int]*sync.Pool) | ||||||
|  | 	bufPoolsLock sync.Mutex | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
 | ||||||
|  | // All written data may be read at most once. Also, BytesPipe allocates
 | ||||||
|  | // and releases new byte slices to adjust to current needs, so the buffer
 | ||||||
|  | // won't be overgrown after peak loads.
 | ||||||
|  | type BytesPipe struct { | ||||||
|  | 	mu       sync.Mutex | ||||||
|  | 	wait     *sync.Cond | ||||||
|  | 	buf      []*fixedBuffer | ||||||
|  | 	bufLen   int | ||||||
|  | 	closeErr error // error to return from next Read. set to nil if not closed.
 | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBytesPipe creates new BytesPipe, initialized by specified slice.
 | ||||||
|  | // If buf is nil, then it will be initialized with slice which cap is 64.
 | ||||||
|  | // buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
 | ||||||
|  | func NewBytesPipe() *BytesPipe { | ||||||
|  | 	bp := &BytesPipe{} | ||||||
|  | 	bp.buf = append(bp.buf, getBuffer(minCap)) | ||||||
|  | 	bp.wait = sync.NewCond(&bp.mu) | ||||||
|  | 	return bp | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Write writes p to BytesPipe.
 | ||||||
|  | // It can allocate new []byte slices in a process of writing.
 | ||||||
|  | func (bp *BytesPipe) Write(p []byte) (int, error) { | ||||||
|  | 	bp.mu.Lock() | ||||||
|  | 
 | ||||||
|  | 	written := 0 | ||||||
|  | loop0: | ||||||
|  | 	for { | ||||||
|  | 		if bp.closeErr != nil { | ||||||
|  | 			bp.mu.Unlock() | ||||||
|  | 			return written, ErrClosed | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if len(bp.buf) == 0 { | ||||||
|  | 			bp.buf = append(bp.buf, getBuffer(64)) | ||||||
|  | 		} | ||||||
|  | 		// get the last buffer
 | ||||||
|  | 		b := bp.buf[len(bp.buf)-1] | ||||||
|  | 
 | ||||||
|  | 		n, err := b.Write(p) | ||||||
|  | 		written += n | ||||||
|  | 		bp.bufLen += n | ||||||
|  | 
 | ||||||
|  | 		// errBufferFull is an error we expect to get if the buffer is full
 | ||||||
|  | 		if err != nil && err != errBufferFull { | ||||||
|  | 			bp.wait.Broadcast() | ||||||
|  | 			bp.mu.Unlock() | ||||||
|  | 			return written, err | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// if there was enough room to write all then break
 | ||||||
|  | 		if len(p) == n { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// more data: write to the next slice
 | ||||||
|  | 		p = p[n:] | ||||||
|  | 
 | ||||||
|  | 		// make sure the buffer doesn't grow too big from this write
 | ||||||
|  | 		for bp.bufLen >= blockThreshold { | ||||||
|  | 			bp.wait.Wait() | ||||||
|  | 			if bp.closeErr != nil { | ||||||
|  | 				continue loop0 | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// add new byte slice to the buffers slice and continue writing
 | ||||||
|  | 		nextCap := b.Cap() * 2 | ||||||
|  | 		if nextCap > maxCap { | ||||||
|  | 			nextCap = maxCap | ||||||
|  | 		} | ||||||
|  | 		bp.buf = append(bp.buf, getBuffer(nextCap)) | ||||||
|  | 	} | ||||||
|  | 	bp.wait.Broadcast() | ||||||
|  | 	bp.mu.Unlock() | ||||||
|  | 	return written, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CloseWithError causes further reads from a BytesPipe to return immediately.
 | ||||||
|  | func (bp *BytesPipe) CloseWithError(err error) error { | ||||||
|  | 	bp.mu.Lock() | ||||||
|  | 	if err != nil { | ||||||
|  | 		bp.closeErr = err | ||||||
|  | 	} else { | ||||||
|  | 		bp.closeErr = io.EOF | ||||||
|  | 	} | ||||||
|  | 	bp.wait.Broadcast() | ||||||
|  | 	bp.mu.Unlock() | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close causes further reads from a BytesPipe to return immediately.
 | ||||||
|  | func (bp *BytesPipe) Close() error { | ||||||
|  | 	return bp.CloseWithError(nil) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read reads bytes from BytesPipe.
 | ||||||
|  | // Data could be read only once.
 | ||||||
|  | func (bp *BytesPipe) Read(p []byte) (n int, err error) { | ||||||
|  | 	bp.mu.Lock() | ||||||
|  | 	if bp.bufLen == 0 { | ||||||
|  | 		if bp.closeErr != nil { | ||||||
|  | 			bp.mu.Unlock() | ||||||
|  | 			return 0, bp.closeErr | ||||||
|  | 		} | ||||||
|  | 		bp.wait.Wait() | ||||||
|  | 		if bp.bufLen == 0 && bp.closeErr != nil { | ||||||
|  | 			err := bp.closeErr | ||||||
|  | 			bp.mu.Unlock() | ||||||
|  | 			return 0, err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	for bp.bufLen > 0 { | ||||||
|  | 		b := bp.buf[0] | ||||||
|  | 		read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
 | ||||||
|  | 		n += read | ||||||
|  | 		bp.bufLen -= read | ||||||
|  | 
 | ||||||
|  | 		if b.Len() == 0 { | ||||||
|  | 			// it's empty so return it to the pool and move to the next one
 | ||||||
|  | 			returnBuffer(b) | ||||||
|  | 			bp.buf[0] = nil | ||||||
|  | 			bp.buf = bp.buf[1:] | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		if len(p) == read { | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		p = p[read:] | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	bp.wait.Broadcast() | ||||||
|  | 	bp.mu.Unlock() | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func returnBuffer(b *fixedBuffer) { | ||||||
|  | 	b.Reset() | ||||||
|  | 	bufPoolsLock.Lock() | ||||||
|  | 	pool := bufPools[b.Cap()] | ||||||
|  | 	bufPoolsLock.Unlock() | ||||||
|  | 	if pool != nil { | ||||||
|  | 		pool.Put(b) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func getBuffer(size int) *fixedBuffer { | ||||||
|  | 	bufPoolsLock.Lock() | ||||||
|  | 	pool, ok := bufPools[size] | ||||||
|  | 	if !ok { | ||||||
|  | 		pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} | ||||||
|  | 		bufPools[size] = pool | ||||||
|  | 	} | ||||||
|  | 	bufPoolsLock.Unlock() | ||||||
|  | 	return pool.Get().(*fixedBuffer) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,162 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 	"io/ioutil" | ||||||
|  | 	"os" | ||||||
|  | 	"path/filepath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
 | ||||||
|  | // temporary file and closing it atomically changes the temporary file to
 | ||||||
|  | // destination path. Writing and closing concurrently is not allowed.
 | ||||||
|  | func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { | ||||||
|  | 	f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	abspath, err := filepath.Abs(filename) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return &atomicFileWriter{ | ||||||
|  | 		f:    f, | ||||||
|  | 		fn:   abspath, | ||||||
|  | 		perm: perm, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AtomicWriteFile atomically writes data to a file named by filename.
 | ||||||
|  | func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { | ||||||
|  | 	f, err := NewAtomicFileWriter(filename, perm) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	n, err := f.Write(data) | ||||||
|  | 	if err == nil && n < len(data) { | ||||||
|  | 		err = io.ErrShortWrite | ||||||
|  | 		f.(*atomicFileWriter).writeErr = err | ||||||
|  | 	} | ||||||
|  | 	if err1 := f.Close(); err == nil { | ||||||
|  | 		err = err1 | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type atomicFileWriter struct { | ||||||
|  | 	f        *os.File | ||||||
|  | 	fn       string | ||||||
|  | 	writeErr error | ||||||
|  | 	perm     os.FileMode | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w *atomicFileWriter) Write(dt []byte) (int, error) { | ||||||
|  | 	n, err := w.f.Write(dt) | ||||||
|  | 	if err != nil { | ||||||
|  | 		w.writeErr = err | ||||||
|  | 	} | ||||||
|  | 	return n, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w *atomicFileWriter) Close() (retErr error) { | ||||||
|  | 	defer func() { | ||||||
|  | 		if retErr != nil || w.writeErr != nil { | ||||||
|  | 			os.Remove(w.f.Name()) | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 	if err := w.f.Sync(); err != nil { | ||||||
|  | 		w.f.Close() | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if err := w.f.Close(); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if err := os.Chmod(w.f.Name(), w.perm); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if w.writeErr == nil { | ||||||
|  | 		return os.Rename(w.f.Name(), w.fn) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AtomicWriteSet is used to atomically write a set
 | ||||||
|  | // of files and ensure they are visible at the same time.
 | ||||||
|  | // Must be committed to a new directory.
 | ||||||
|  | type AtomicWriteSet struct { | ||||||
|  | 	root string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewAtomicWriteSet creates a new atomic write set to
 | ||||||
|  | // atomically create a set of files. The given directory
 | ||||||
|  | // is used as the base directory for storing files before
 | ||||||
|  | // commit. If no temporary directory is given the system
 | ||||||
|  | // default is used.
 | ||||||
|  | func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { | ||||||
|  | 	td, err := ioutil.TempDir(tmpDir, "write-set-") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return &AtomicWriteSet{ | ||||||
|  | 		root: td, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteFile writes a file to the set, guaranteeing the file
 | ||||||
|  | // has been synced.
 | ||||||
|  | func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { | ||||||
|  | 	f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	n, err := f.Write(data) | ||||||
|  | 	if err == nil && n < len(data) { | ||||||
|  | 		err = io.ErrShortWrite | ||||||
|  | 	} | ||||||
|  | 	if err1 := f.Close(); err == nil { | ||||||
|  | 		err = err1 | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type syncFileCloser struct { | ||||||
|  | 	*os.File | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w syncFileCloser) Close() error { | ||||||
|  | 	err := w.File.Sync() | ||||||
|  | 	if err1 := w.File.Close(); err == nil { | ||||||
|  | 		err = err1 | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // FileWriter opens a file writer inside the set. The file
 | ||||||
|  | // should be synced and closed before calling commit.
 | ||||||
|  | func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { | ||||||
|  | 	f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	return syncFileCloser{f}, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cancel cancels the set and removes all temporary data
 | ||||||
|  | // created in the set.
 | ||||||
|  | func (ws *AtomicWriteSet) Cancel() error { | ||||||
|  | 	return os.RemoveAll(ws.root) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Commit moves all created files to the target directory. The
 | ||||||
|  | // target directory must not exist and the parent of the target
 | ||||||
|  | // directory must exist.
 | ||||||
|  | func (ws *AtomicWriteSet) Commit(target string) error { | ||||||
|  | 	return os.Rename(ws.root, target) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // String returns the location the set is writing to.
 | ||||||
|  | func (ws *AtomicWriteSet) String() string { | ||||||
|  | 	return ws.root | ||||||
|  | } | ||||||
|  | @ -0,0 +1,158 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"crypto/sha256" | ||||||
|  | 	"encoding/hex" | ||||||
|  | 	"io" | ||||||
|  | 
 | ||||||
|  | 	"golang.org/x/net/context" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
 | ||||||
|  | // It calls the given callback function when closed. It should be constructed
 | ||||||
|  | // with NewReadCloserWrapper
 | ||||||
|  | type ReadCloserWrapper struct { | ||||||
|  | 	io.Reader | ||||||
|  | 	closer func() error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close calls back the passed closer function
 | ||||||
|  | func (r *ReadCloserWrapper) Close() error { | ||||||
|  | 	return r.closer() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewReadCloserWrapper returns a new io.ReadCloser.
 | ||||||
|  | func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { | ||||||
|  | 	return &ReadCloserWrapper{ | ||||||
|  | 		Reader: r, | ||||||
|  | 		closer: closer, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type readerErrWrapper struct { | ||||||
|  | 	reader io.Reader | ||||||
|  | 	closer func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *readerErrWrapper) Read(p []byte) (int, error) { | ||||||
|  | 	n, err := r.reader.Read(p) | ||||||
|  | 	if err != nil { | ||||||
|  | 		r.closer() | ||||||
|  | 	} | ||||||
|  | 	return n, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewReaderErrWrapper returns a new io.Reader.
 | ||||||
|  | func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { | ||||||
|  | 	return &readerErrWrapper{ | ||||||
|  | 		reader: r, | ||||||
|  | 		closer: closer, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // HashData returns the sha256 sum of src.
 | ||||||
|  | func HashData(src io.Reader) (string, error) { | ||||||
|  | 	h := sha256.New() | ||||||
|  | 	if _, err := io.Copy(h, src); err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 	return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // OnEOFReader wraps an io.ReadCloser and a function
 | ||||||
|  | // the function will run at the end of file or close the file.
 | ||||||
|  | type OnEOFReader struct { | ||||||
|  | 	Rc io.ReadCloser | ||||||
|  | 	Fn func() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *OnEOFReader) Read(p []byte) (n int, err error) { | ||||||
|  | 	n, err = r.Rc.Read(p) | ||||||
|  | 	if err == io.EOF { | ||||||
|  | 		r.runFunc() | ||||||
|  | 	} | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close closes the file and run the function.
 | ||||||
|  | func (r *OnEOFReader) Close() error { | ||||||
|  | 	err := r.Rc.Close() | ||||||
|  | 	r.runFunc() | ||||||
|  | 	return err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *OnEOFReader) runFunc() { | ||||||
|  | 	if fn := r.Fn; fn != nil { | ||||||
|  | 		fn() | ||||||
|  | 		r.Fn = nil | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
 | ||||||
|  | // operations.
 | ||||||
|  | type cancelReadCloser struct { | ||||||
|  | 	cancel func() | ||||||
|  | 	pR     *io.PipeReader // Stream to read from
 | ||||||
|  | 	pW     *io.PipeWriter | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
 | ||||||
|  | // context is cancelled. The returned io.ReadCloser must be closed when it is
 | ||||||
|  | // no longer needed.
 | ||||||
|  | func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { | ||||||
|  | 	pR, pW := io.Pipe() | ||||||
|  | 
 | ||||||
|  | 	// Create a context used to signal when the pipe is closed
 | ||||||
|  | 	doneCtx, cancel := context.WithCancel(context.Background()) | ||||||
|  | 
 | ||||||
|  | 	p := &cancelReadCloser{ | ||||||
|  | 		cancel: cancel, | ||||||
|  | 		pR:     pR, | ||||||
|  | 		pW:     pW, | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	go func() { | ||||||
|  | 		_, err := io.Copy(pW, in) | ||||||
|  | 		select { | ||||||
|  | 		case <-ctx.Done(): | ||||||
|  | 			// If the context was closed, p.closeWithError
 | ||||||
|  | 			// was already called. Calling it again would
 | ||||||
|  | 			// change the error that Read returns.
 | ||||||
|  | 		default: | ||||||
|  | 			p.closeWithError(err) | ||||||
|  | 		} | ||||||
|  | 		in.Close() | ||||||
|  | 	}() | ||||||
|  | 	go func() { | ||||||
|  | 		for { | ||||||
|  | 			select { | ||||||
|  | 			case <-ctx.Done(): | ||||||
|  | 				p.closeWithError(ctx.Err()) | ||||||
|  | 			case <-doneCtx.Done(): | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
|  | 
 | ||||||
|  | 	return p | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Read wraps the Read method of the pipe that provides data from the wrapped
 | ||||||
|  | // ReadCloser.
 | ||||||
|  | func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { | ||||||
|  | 	return p.pR.Read(buf) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // closeWithError closes the wrapper and its underlying reader. It will
 | ||||||
|  | // cause future calls to Read to return err.
 | ||||||
|  | func (p *cancelReadCloser) closeWithError(err error) { | ||||||
|  | 	p.pW.CloseWithError(err) | ||||||
|  | 	p.cancel() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close closes the wrapper its underlying reader. It will cause
 | ||||||
|  | // future calls to Read to return io.EOF.
 | ||||||
|  | func (p *cancelReadCloser) Close() error { | ||||||
|  | 	p.closeWithError(io.EOF) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,10 @@ | ||||||
|  | // +build !windows
 | ||||||
|  | 
 | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import "io/ioutil" | ||||||
|  | 
 | ||||||
|  | // TempDir on Unix systems is equivalent to ioutil.TempDir.
 | ||||||
|  | func TempDir(dir, prefix string) (string, error) { | ||||||
|  | 	return ioutil.TempDir(dir, prefix) | ||||||
|  | } | ||||||
|  | @ -0,0 +1,16 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io/ioutil" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/longpath" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
 | ||||||
|  | func TempDir(dir, prefix string) (string, error) { | ||||||
|  | 	tempDir, err := ioutil.TempDir(dir, prefix) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 	return longpath.AddPrefix(tempDir), nil | ||||||
|  | } | ||||||
|  | @ -0,0 +1,92 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"io" | ||||||
|  | 	"sync" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // WriteFlusher wraps the Write and Flush operation ensuring that every write
 | ||||||
|  | // is a flush. In addition, the Close method can be called to intercept
 | ||||||
|  | // Read/Write calls if the targets lifecycle has already ended.
 | ||||||
|  | type WriteFlusher struct { | ||||||
|  | 	w           io.Writer | ||||||
|  | 	flusher     flusher | ||||||
|  | 	flushed     chan struct{} | ||||||
|  | 	flushedOnce sync.Once | ||||||
|  | 	closed      chan struct{} | ||||||
|  | 	closeLock   sync.Mutex | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type flusher interface { | ||||||
|  | 	Flush() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | var errWriteFlusherClosed = io.EOF | ||||||
|  | 
 | ||||||
|  | func (wf *WriteFlusher) Write(b []byte) (n int, err error) { | ||||||
|  | 	select { | ||||||
|  | 	case <-wf.closed: | ||||||
|  | 		return 0, errWriteFlusherClosed | ||||||
|  | 	default: | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	n, err = wf.w.Write(b) | ||||||
|  | 	wf.Flush() // every write is a flush.
 | ||||||
|  | 	return n, err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Flush the stream immediately.
 | ||||||
|  | func (wf *WriteFlusher) Flush() { | ||||||
|  | 	select { | ||||||
|  | 	case <-wf.closed: | ||||||
|  | 		return | ||||||
|  | 	default: | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	wf.flushedOnce.Do(func() { | ||||||
|  | 		close(wf.flushed) | ||||||
|  | 	}) | ||||||
|  | 	wf.flusher.Flush() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Flushed returns the state of flushed.
 | ||||||
|  | // If it's flushed, return true, or else it return false.
 | ||||||
|  | func (wf *WriteFlusher) Flushed() bool { | ||||||
|  | 	// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
 | ||||||
|  | 	// be used to detect whether or a response code has been issued or not.
 | ||||||
|  | 	// Another hook should be used instead.
 | ||||||
|  | 	var flushed bool | ||||||
|  | 	select { | ||||||
|  | 	case <-wf.flushed: | ||||||
|  | 		flushed = true | ||||||
|  | 	default: | ||||||
|  | 	} | ||||||
|  | 	return flushed | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Close closes the write flusher, disallowing any further writes to the
 | ||||||
|  | // target. After the flusher is closed, all calls to write or flush will
 | ||||||
|  | // result in an error.
 | ||||||
|  | func (wf *WriteFlusher) Close() error { | ||||||
|  | 	wf.closeLock.Lock() | ||||||
|  | 	defer wf.closeLock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	select { | ||||||
|  | 	case <-wf.closed: | ||||||
|  | 		return errWriteFlusherClosed | ||||||
|  | 	default: | ||||||
|  | 		close(wf.closed) | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewWriteFlusher returns a new WriteFlusher.
 | ||||||
|  | func NewWriteFlusher(w io.Writer) *WriteFlusher { | ||||||
|  | 	var fl flusher | ||||||
|  | 	if f, ok := w.(flusher); ok { | ||||||
|  | 		fl = f | ||||||
|  | 	} else { | ||||||
|  | 		fl = &NopFlusher{} | ||||||
|  | 	} | ||||||
|  | 	return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} | ||||||
|  | } | ||||||
|  | @ -0,0 +1,66 @@ | ||||||
|  | package ioutils // import "github.com/docker/docker/pkg/ioutils"
 | ||||||
|  | 
 | ||||||
|  | import "io" | ||||||
|  | 
 | ||||||
|  | // NopWriter represents a type which write operation is nop.
 | ||||||
|  | type NopWriter struct{} | ||||||
|  | 
 | ||||||
|  | func (*NopWriter) Write(buf []byte) (int, error) { | ||||||
|  | 	return len(buf), nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type nopWriteCloser struct { | ||||||
|  | 	io.Writer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (w *nopWriteCloser) Close() error { return nil } | ||||||
|  | 
 | ||||||
|  | // NopWriteCloser returns a nopWriteCloser.
 | ||||||
|  | func NopWriteCloser(w io.Writer) io.WriteCloser { | ||||||
|  | 	return &nopWriteCloser{w} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NopFlusher represents a type which flush operation is nop.
 | ||||||
|  | type NopFlusher struct{} | ||||||
|  | 
 | ||||||
|  | // Flush is a nop operation.
 | ||||||
|  | func (f *NopFlusher) Flush() {} | ||||||
|  | 
 | ||||||
|  | type writeCloserWrapper struct { | ||||||
|  | 	io.Writer | ||||||
|  | 	closer func() error | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *writeCloserWrapper) Close() error { | ||||||
|  | 	return r.closer() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewWriteCloserWrapper returns a new io.WriteCloser.
 | ||||||
|  | func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { | ||||||
|  | 	return &writeCloserWrapper{ | ||||||
|  | 		Writer: r, | ||||||
|  | 		closer: closer, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WriteCounter wraps a concrete io.Writer and hold a count of the number
 | ||||||
|  | // of bytes written to the writer during a "session".
 | ||||||
|  | // This can be convenient when write return is masked
 | ||||||
|  | // (e.g., json.Encoder.Encode())
 | ||||||
|  | type WriteCounter struct { | ||||||
|  | 	Count  int64 | ||||||
|  | 	Writer io.Writer | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewWriteCounter returns a new WriteCounter.
 | ||||||
|  | func NewWriteCounter(w io.Writer) *WriteCounter { | ||||||
|  | 	return &WriteCounter{ | ||||||
|  | 		Writer: w, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (wc *WriteCounter) Write(p []byte) (count int, err error) { | ||||||
|  | 	count, err = wc.Writer.Write(p) | ||||||
|  | 	wc.Count += int64(count) | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | @ -0,0 +1,26 @@ | ||||||
|  | // longpath introduces some constants and helper functions for handling long paths
 | ||||||
|  | // in Windows, which are expected to be prepended with `\\?\` and followed by either
 | ||||||
|  | // a drive letter, a UNC server\share, or a volume identifier.
 | ||||||
|  | 
 | ||||||
|  | package longpath // import "github.com/docker/docker/pkg/longpath"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Prefix is the longpath prefix for Windows file paths.
 | ||||||
|  | const Prefix = `\\?\` | ||||||
|  | 
 | ||||||
|  | // AddPrefix will add the Windows long path prefix to the path provided if
 | ||||||
|  | // it does not already have it.
 | ||||||
|  | func AddPrefix(path string) string { | ||||||
|  | 	if !strings.HasPrefix(path, Prefix) { | ||||||
|  | 		if strings.HasPrefix(path, `\\`) { | ||||||
|  | 			// This is a UNC path, so we need to add 'UNC' to the path as well.
 | ||||||
|  | 			path = Prefix + `UNC` + path[1:] | ||||||
|  | 		} else { | ||||||
|  | 			path = Prefix + path | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return path | ||||||
|  | } | ||||||
|  | @ -0,0 +1,137 @@ | ||||||
|  | // Package pools provides a collection of pools which provide various
 | ||||||
|  | // data types with buffers. These can be used to lower the number of
 | ||||||
|  | // memory allocations and reuse buffers.
 | ||||||
|  | //
 | ||||||
|  | // New pools should be added to this package to allow them to be
 | ||||||
|  | // shared across packages.
 | ||||||
|  | //
 | ||||||
|  | // Utility functions which operate on pools should be added to this
 | ||||||
|  | // package to allow them to be reused.
 | ||||||
|  | package pools // import "github.com/docker/docker/pkg/pools"
 | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bufio" | ||||||
|  | 	"io" | ||||||
|  | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"github.com/docker/docker/pkg/ioutils" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | const buffer32K = 32 * 1024 | ||||||
|  | 
 | ||||||
|  | var ( | ||||||
|  | 	// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
 | ||||||
|  | 	BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) | ||||||
|  | 	// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
 | ||||||
|  | 	BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) | ||||||
|  | 	buffer32KPool      = newBufferPoolWithSize(buffer32K) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // BufioReaderPool is a bufio reader that uses sync.Pool.
 | ||||||
|  | type BufioReaderPool struct { | ||||||
|  | 	pool sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newBufioReaderPoolWithSize is unexported because new pools should be
 | ||||||
|  | // added here to be shared where required.
 | ||||||
|  | func newBufioReaderPoolWithSize(size int) *BufioReaderPool { | ||||||
|  | 	return &BufioReaderPool{ | ||||||
|  | 		pool: sync.Pool{ | ||||||
|  | 			New: func() interface{} { return bufio.NewReaderSize(nil, size) }, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
 | ||||||
|  | func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { | ||||||
|  | 	buf := bufPool.pool.Get().(*bufio.Reader) | ||||||
|  | 	buf.Reset(r) | ||||||
|  | 	return buf | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Put puts the bufio.Reader back into the pool.
 | ||||||
|  | func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { | ||||||
|  | 	b.Reset(nil) | ||||||
|  | 	bufPool.pool.Put(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type bufferPool struct { | ||||||
|  | 	pool sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func newBufferPoolWithSize(size int) *bufferPool { | ||||||
|  | 	return &bufferPool{ | ||||||
|  | 		pool: sync.Pool{ | ||||||
|  | 			New: func() interface{} { return make([]byte, size) }, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bp *bufferPool) Get() []byte { | ||||||
|  | 	return bp.pool.Get().([]byte) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (bp *bufferPool) Put(b []byte) { | ||||||
|  | 	bp.pool.Put(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
 | ||||||
|  | func Copy(dst io.Writer, src io.Reader) (written int64, err error) { | ||||||
|  | 	buf := buffer32KPool.Get() | ||||||
|  | 	written, err = io.CopyBuffer(dst, src, buf) | ||||||
|  | 	buffer32KPool.Put(buf) | ||||||
|  | 	return | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
 | ||||||
|  | // into the pool and closes the reader if it's an io.ReadCloser.
 | ||||||
|  | func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { | ||||||
|  | 	return ioutils.NewReadCloserWrapper(r, func() error { | ||||||
|  | 		if readCloser, ok := r.(io.ReadCloser); ok { | ||||||
|  | 			readCloser.Close() | ||||||
|  | 		} | ||||||
|  | 		bufPool.Put(buf) | ||||||
|  | 		return nil | ||||||
|  | 	}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // BufioWriterPool is a bufio writer that uses sync.Pool.
 | ||||||
|  | type BufioWriterPool struct { | ||||||
|  | 	pool sync.Pool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // newBufioWriterPoolWithSize is unexported because new pools should be
 | ||||||
|  | // added here to be shared where required.
 | ||||||
|  | func newBufioWriterPoolWithSize(size int) *BufioWriterPool { | ||||||
|  | 	return &BufioWriterPool{ | ||||||
|  | 		pool: sync.Pool{ | ||||||
|  | 			New: func() interface{} { return bufio.NewWriterSize(nil, size) }, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
 | ||||||
|  | func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { | ||||||
|  | 	buf := bufPool.pool.Get().(*bufio.Writer) | ||||||
|  | 	buf.Reset(w) | ||||||
|  | 	return buf | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Put puts the bufio.Writer back into the pool.
 | ||||||
|  | func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { | ||||||
|  | 	b.Reset(nil) | ||||||
|  | 	bufPool.pool.Put(b) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
 | ||||||
|  | // into the pool and closes the writer if it's an io.Writecloser.
 | ||||||
|  | func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { | ||||||
|  | 	return ioutils.NewWriteCloserWrapper(w, func() error { | ||||||
|  | 		buf.Flush() | ||||||
|  | 		if writeCloser, ok := w.(io.WriteCloser); ok { | ||||||
|  | 			writeCloser.Close() | ||||||
|  | 		} | ||||||
|  | 		bufPool.Put(buf) | ||||||
|  | 		return nil | ||||||
|  | 	}) | ||||||
|  | } | ||||||
		Loading…
	
		Reference in New Issue