diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e845e459a5c1..d168ad280a3e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + ## 1.5.0 (2015-02-10) #### Builder diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e6bf6ad5f3cab..395f25923409d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -164,7 +164,7 @@ However, there might be a way to implement that feature *on top of* Docker. Stack Overflow Stack Overflow has over 7000K Docker questions listed. We regularly - monitor Docker questions + monitor Docker questions and so do many other knowledgeable Docker users. diff --git a/Dockerfile b/Dockerfile index b54fda561407e..6be471a7bb5f4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -98,12 +98,19 @@ RUN cd /usr/local/go/src \ ./make.bash --no-clean 2>&1; \ done -# We still support compiling with older Go, so need to grab older "gofmt" -ENV GOFMT_VERSION 1.3.3 -RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt +# This has been commented out and kept as reference because we don't support compiling with older Go anymore. +# ENV GOFMT_VERSION 1.3.3 +# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt +# Update this sha when we upgrade to go 1.5.0 +ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 # Grab Go's cover tool for dead-simple code coverage testing -RUN go get golang.org/x/tools/cmd/cover +# Grab Go's vet tool for examining go code to find suspicious constructs +# and help prevent errors that the compiler might not catch +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ + && go install -v golang.org/x/tools/cmd/cover \ + && go install -v golang.org/x/tools/cmd/vet # TODO replace FPM with some very minimal debhelper stuff RUN gem install --no-rdoc --no-ri fpm --version 1.3.2 diff --git a/LICENSE b/LICENSE index 508036ef4f318..c7a3f0cfd4562 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -182,7 +182,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/MAINTAINERS b/MAINTAINERS index e0ddc9f1a5d90..5c02fa671be1c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -37,7 +37,7 @@ project from a great one. text = """ Docker follows the timeless, highly efficient and totally unfair system known as [Benevolent dictator for -life](http://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with +life](https://en.wikipedia.org/wiki/Benevolent_Dictator_for_Life), with yours truly, Solomon Hykes, in the role of BDFL. This means that all decisions are made, by default, by Solomon. Since making every decision myself would be highly un-scalable, in practice decisions are spread @@ -408,7 +408,9 @@ made through a pull request. people = [ "fredlf", "james", - "mary", + "moxiegirl", + "thaJeztah", + "jamtur01", "spf13", "sven" ] @@ -585,7 +587,7 @@ made through a pull request. Email = "lk4d4@docker.com" GitHub = "lk4d4" - [people.mary] + [people.moxiegirl] Name = "Mary Anthony" Email = "mary.anthony@docker.com" GitHub = "moxiegirl" diff --git a/Makefile b/Makefile index 9bf1b16c9456c..b60b2a4d0004d 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,7 @@ test-docker-py: build $(DOCKER_RUN_DOCKER) hack/make.sh binary test-docker-py validate: build - $(DOCKER_RUN_DOCKER) hack/make.sh validate-gofmt validate-dco validate-toml + $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-gofmt validate-test validate-toml validate-vet shell: build $(DOCKER_RUN_DOCKER) bash diff --git a/NOTICE b/NOTICE index 8e84d0f3b2337..6e6f469ab9b28 100644 --- a/NOTICE +++ b/NOTICE @@ -1,7 +1,7 @@ Docker Copyright 2012-2015 Docker, Inc. -This product includes software developed at Docker, Inc. (http://www.docker.com). +This product includes software developed at Docker, Inc. (https://www.docker.com). This product contains software (https://github.com/kr/pty) developed by Keith Rarick, licensed under the MIT License. @@ -14,6 +14,6 @@ United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. -For more information, please see http://www.bis.doc.gov +For more information, please see https://www.bis.doc.gov -See also http://www.apache.org/dev/crypto.html and/or seek legal counsel. +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/README.md b/README.md index 6d259c5edbcce..5603a55a7f178 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ databases, and backend services without depending on a particular stack or provider. Docker began as an open-source implementation of the deployment engine which -powers [dotCloud](http://dotcloud.com), a popular Platform-as-a-Service. +powers [dotCloud](https://dotcloud.com), a popular Platform-as-a-Service. It benefits directly from the experience accumulated over several years of large-scale operation and support of hundreds of thousands of applications and databases. @@ -56,12 +56,12 @@ By contrast, Docker relies on a different sandboxing method known as *containerization*. Unlike traditional virtualization, containerization takes place at the kernel level. Most modern operating system kernels now support the primitives necessary for containerization, including -Linux with [openvz](http://openvz.org), +Linux with [openvz](https://openvz.org), [vserver](http://linux-vserver.org) and more recently [lxc](http://lxc.sourceforge.net), Solaris with -[zones](http://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), and FreeBSD with -[Jails](http://www.freebsd.org/doc/handbook/jails.html). +[Jails](https://www.freebsd.org/doc/handbook/jails.html). Docker builds on top of these low-level primitives to offer developers a portable format and runtime environment that solves all four problems. @@ -115,7 +115,7 @@ This is usually difficult for several reasons: Docker solves the problem of dependency hell by giving the developer a simple way to express *all* their application's dependencies in one place, while streamlining the process of assembling them. If this makes you think of -[XKCD 927](http://xkcd.com/927/), don't worry. Docker doesn't +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't *replace* your favorite packaging systems. It simply orchestrates their use in a simple and repeatable way. How does it do that? With layers. @@ -147,10 +147,10 @@ Docker can be installed on your local machine as well as servers - both bare metal and virtualized. It is available as a binary on most modern Linux systems, or as a VM on Windows, Mac and other systems. -We also offer an [interactive tutorial](http://www.docker.com/tryit/) +We also offer an [interactive tutorial](https://www.docker.com/tryit/) for quickly learning the basics of using Docker. -For up-to-date install instructions, see the [Docs](http://docs.docker.com). +For up-to-date install instructions, see the [Docs](https://docs.docker.com). Usage examples ============== @@ -159,7 +159,7 @@ Docker can be used to run short-lived commands, long-running daemons (app servers, databases, etc.), interactive shell sessions, etc. You can find a [list of real-world -examples](http://docs.docker.com/examples/) in the +examples](https://docs.docker.com/examples/) in the documentation. Under the hood @@ -172,14 +172,14 @@ Under the hood, Docker is built on the following components: and [namespacing](http://blog.dotcloud.com/under-the-hood-linux-kernels-on-dotcloud-part) capabilities of the Linux kernel -* The [Go](http://golang.org) programming language +* The [Go](https://golang.org) programming language * The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) * The [Libcontainer Specification](https://github.com/docker/libcontainer/blob/master/SPEC.md) Contributing to Docker ====================== -[![GoDoc](https://godoc.org/github.com/docker/docker?status.png)](https://godoc.org/github.com/docker/docker) +[![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) [![Jenkins Build Status](https://jenkins.dockerproject.com/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.com/job/Docker%20Master/) Want to hack on Docker? Awesome! We have [instructions to help you get @@ -218,7 +218,7 @@ United States and other governments. It is your responsibility to ensure that your use and/or transfer does not violate applicable laws. -For more information, please see http://www.bis.doc.gov +For more information, please see https://www.bis.doc.gov Licensing diff --git a/VERSION b/VERSION index 59b9db0c75150..de023c91b16b6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.5.0-dev +1.7.0-dev diff --git a/api/client/attach.go b/api/client/attach.go index 48cb8b4478638..8ab3248aceb08 100644 --- a/api/client/attach.go +++ b/api/client/attach.go @@ -1,15 +1,15 @@ package client import ( + "encoding/json" "fmt" "io" "net/url" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/utils" ) // CmdAttach attaches to a running container. @@ -31,25 +31,20 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return err } - env := engine.Env{} - if err := env.Decode(stream); err != nil { + var c types.ContainerJSON + if err := json.NewDecoder(stream).Decode(&c); err != nil { return err } - if !env.GetSubEnv("State").GetBool("Running") { + if !c.State.Running { return fmt.Errorf("You cannot attach to a stopped container, start it first") } - var ( - config = env.GetSubEnv("Config") - tty = config.GetBool("Tty") - ) - - if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { + if err := cli.CheckTtyInput(!*noStdin, c.Config.Tty); err != nil { return err } - if tty && cli.isTerminalOut { + if c.Config.Tty && cli.isTerminalOut { if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { logrus.Debugf("Error monitoring TTY size: %s", err) } @@ -59,7 +54,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { v := url.Values{} v.Set("stream", "1") - if !*noStdin && config.GetBool("OpenStdin") { + if !*noStdin && c.Config.OpenStdin { v.Set("stdin", "1") in = cli.in } @@ -67,12 +62,12 @@ func (cli *DockerCli) CmdAttach(args ...string) error { v.Set("stdout", "1") v.Set("stderr", "1") - if *proxy && !tty { + if *proxy && !c.Config.Tty { sigc := cli.forwardAllSignals(cmd.Arg(0)) defer signal.StopCatch(sigc) } - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { + if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), c.Config.Tty, in, cli.out, cli.err, nil, nil); err != nil { return err } @@ -81,7 +76,7 @@ func (cli *DockerCli) CmdAttach(args ...string) error { return err } if status != 0 { - return &utils.StatusError{StatusCode: status} + return StatusError{StatusCode: status} } return nil diff --git a/api/client/build.go b/api/client/build.go index f1bceb4a161a3..e83de976beb18 100644 --- a/api/client/build.go +++ b/api/client/build.go @@ -55,7 +55,9 @@ func (cli *DockerCli) CmdBuild(args ...string) error { flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") + flCpuQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flCPUSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCPUSetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) @@ -94,20 +96,11 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } else { root := cmd.Arg(0) if urlutil.IsGitURL(root) { - remoteURL := cmd.Arg(0) - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - - root, err = ioutil.TempDir("", "docker-build-git") + root, err = utils.GitClone(root) if err != nil { return err } defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } } if _, err := os.Stat(root); err != nil { return err @@ -180,7 +173,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { includes = append(includes, ".dockerignore", *dockerfileName) } - if err = utils.ValidateContextDirectory(root, excludes); err != nil { + if err := utils.ValidateContextDirectory(root, excludes); err != nil { return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) } options := &archive.TarOptions{ @@ -278,16 +271,16 @@ func (cli *DockerCli) CmdBuild(args ...string) error { } v.Set("cpusetcpus", *flCPUSetCpus) + v.Set("cpusetmems", *flCPUSetMems) v.Set("cpushares", strconv.FormatInt(*flCPUShares, 10)) + v.Set("cpuquota", strconv.FormatInt(*flCpuQuota, 10)) v.Set("memory", strconv.FormatInt(memory, 10)) v.Set("memswap", strconv.FormatInt(memorySwap, 10)) v.Set("dockerfile", *dockerfileName) - cli.LoadConfigFile() - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(cli.configFile) + buf, err := json.Marshal(cli.configFile.AuthConfigs) if err != nil { return err } @@ -302,7 +295,7 @@ func (cli *DockerCli) CmdBuild(args ...string) error { if jerr.Code == 0 { jerr.Code = 1 } - return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} + return StatusError{Status: jerr.Message, StatusCode: jerr.Code} } return err } diff --git a/api/client/checkpoint.go b/api/client/checkpoint.go new file mode 100644 index 0000000000000..3129c9b6c6cac --- /dev/null +++ b/api/client/checkpoint.go @@ -0,0 +1,50 @@ +package client + +import ( + "fmt" + + "github.com/docker/libcontainer" +) + +func (cli *DockerCli) CmdCheckpoint(args ...string) error { + cmd := cli.Subcmd("checkpoint", "CONTAINER [CONTAINER...]", "Checkpoint one or more running containers", true) + + var ( + flImgDir = cmd.String([]string{"-image-dir"}, "", "(optional) directory for storing checkpoint image files") + flWorkDir = cmd.String([]string{"-work-dir"}, "", "directory for storing log file") + flLeaveRunning = cmd.Bool([]string{"-leave-running"}, false, "leave the container running after checkpointing") + flCheckTcp = cmd.Bool([]string{"-allow-tcp"}, false, "allow checkpointing established tcp connections") + flExtUnix = cmd.Bool([]string{"-allow-ext-unix"}, false, "allow checkpointing external unix connections") + flShell = cmd.Bool([]string{"-allow-shell"}, false, "allow checkpointing shell jobs") + ) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + criuOpts := &libcontainer.CriuOpts{ + ImagesDirectory: *flImgDir, + WorkDirectory: *flWorkDir, + LeaveRunning: *flLeaveRunning, + TcpEstablished: *flCheckTcp, + ExternalUnixConnections: *flExtUnix, + ShellJob: *flShell, + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/checkpoint", criuOpts, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to checkpoint one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} diff --git a/api/client/cli.go b/api/client/cli.go index 01b5f2e63f26a..600d4cc5a3abb 100644 --- a/api/client/cli.go +++ b/api/client/cli.go @@ -9,36 +9,51 @@ import ( "net" "net/http" "os" + "path/filepath" "reflect" "strings" "text/template" "time" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/homedir" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" ) +// DockerCli represents the docker command line client. +// Instances of the client can be returned from NewDockerCli. type DockerCli struct { - proto string - addr string - configFile *registry.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - keyFile string - tlsConfig *tls.Config - scheme string - // inFd holds file descriptor of the client's STDIN, if it's a valid file + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + + // configFile has the client configuration file + configFile *cliconfig.ConfigFile + // in holds the input stream and closer (io.ReadCloser) for the client. + in io.ReadCloser + // out holds the output stream (io.Writer) for the client. + out io.Writer + // err holds the error stream (io.Writer) for the client. + err io.Writer + // keyFile holds the key file as a string. + keyFile string + // tlsConfig holds the TLS configuration for the client, and will + // set the scheme to https in NewDockerCli if present. + tlsConfig *tls.Config + // scheme holds the scheme of the client i.e. https. + scheme string + // inFd holds the file descriptor of the client's STDIN (if valid). inFd uintptr - // outFd holds file descriptor of the client's STDOUT, if it's a valid file + // outFd holds file descriptor of the client's STDOUT (if valid). outFd uintptr - // isTerminalIn describes if client's STDIN is a TTY + // isTerminalIn indicates whether the client's STDIN is a TTY isTerminalIn bool - // isTerminalOut describes if client's STDOUT is a TTY + // isTerminalOut dindicates whether the client's STDOUT is a TTY isTerminalOut bool - transport *http.Transport + // transport holds the client transport instance. + transport *http.Transport } var funcMap = template.FuncMap{ @@ -83,6 +98,11 @@ func (cli *DockerCli) Cmd(args ...string) error { return cli.CmdHelp() } +// Subcmd is a subcommand of the main "docker" command. +// A subcommand represents an action that can be performed +// from the Docker command line client. +// +// To see all available subcommands, run "docker --help". func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet { var errorHandling flag.ErrorHandling if exitOnError { @@ -107,14 +127,8 @@ func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bo return flags } -func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = registry.LoadConfig(homedir.Get()) - if err != nil { - fmt.Fprintf(cli.err, "WARNING: %s\n", err) - } - return err -} - +// CheckTtyInput checks if we are trying to attach to a container tty +// from a non-tty client input stream, and if so, returns an error. func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { // In order to attach to a container tty, input stream for the client must // be a tty itself: redirecting or piping the client standard input is @@ -125,6 +139,10 @@ func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { return nil } +// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. +// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config +// is set the client scheme will be set to https. +// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( inFd uintptr @@ -149,15 +167,15 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a err = out } - // The transport is created here for reuse during the client session + // The transport is created here for reuse during the client session. tr := &http.Transport{ TLSClientConfig: tlsConfig, } - // Why 32? See issue 8035 + // Why 32? See https://github.com/docker/docker/pull/8035. timeout := 32 * time.Second if proto == "unix" { - // no need in compressing for local communications + // No need for compression in local communications. tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) @@ -167,9 +185,15 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, a tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } + configFile, e := cliconfig.Load(filepath.Join(homedir.Get(), ".docker")) + if e != nil { + fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) + } + return &DockerCli{ proto: proto, addr: addr, + configFile: configFile, in: in, out: out, err: err, diff --git a/api/client/client.go b/api/client/client.go index 4cfce5f6842cb..31708817441c6 100644 --- a/api/client/client.go +++ b/api/client/client.go @@ -3,3 +3,15 @@ // Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. // See https://docs.docker.com/installation/ for instructions on installing Docker. package client + +import "fmt" + +// An StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/api/client/cp.go b/api/client/cp.go index f32e55187fa49..392e362929e70 100644 --- a/api/client/cp.go +++ b/api/client/cp.go @@ -21,7 +21,8 @@ func (cli *DockerCli) CmdCp(args ...string) error { cmd.ParseFlags(args, true) - info := strings.Split(cmd.Arg(0), ":") + // deal with path name with `:` + info := strings.SplitN(cmd.Arg(0), ":", 2) if len(info) != 2 { return fmt.Errorf("Error: Path not specified") diff --git a/api/client/create.go b/api/client/create.go index bb84d5e4638ad..b0819a05d7043 100644 --- a/api/client/create.go +++ b/api/client/create.go @@ -37,11 +37,8 @@ func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { return err } - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) buf, err := json.Marshal(authConfig) if err != nil { return err diff --git a/api/client/diff.go b/api/client/diff.go index 3f6c28384ef73..6000c6b388f66 100644 --- a/api/client/diff.go +++ b/api/client/diff.go @@ -21,14 +21,17 @@ func (cli *DockerCli) CmdDiff(args ...string) error { cmd.Require(flag.Exact, 1) cmd.ParseFlags(args, true) + if cmd.Arg(0) == "" { + return fmt.Errorf("Container name cannot be empty") + } + rdr, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, nil) if err != nil { return err } changes := []types.ContainerChange{} - err = json.NewDecoder(rdr).Decode(&changes) - if err != nil { + if err := json.NewDecoder(rdr).Decode(&changes); err != nil { return err } diff --git a/api/client/exec.go b/api/client/exec.go index 27e6878df4302..4bf53eaec2cb4 100644 --- a/api/client/exec.go +++ b/api/client/exec.go @@ -9,7 +9,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) // CmdExec runs a command in a running container. @@ -21,7 +20,7 @@ func (cli *DockerCli) CmdExec(args ...string) error { execConfig, err := runconfig.ParseExec(cmd, args) // just in case the ParseExec does not exit if execConfig.Container == "" || err != nil { - return &utils.StatusError{StatusCode: 1} + return StatusError{StatusCode: 1} } stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, nil) @@ -33,9 +32,6 @@ func (cli *DockerCli) CmdExec(args ...string) error { if err := json.NewDecoder(stream).Decode(&response); err != nil { return err } - for _, warning := range response.Warnings { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } execID := response.ID @@ -44,12 +40,18 @@ func (cli *DockerCli) CmdExec(args ...string) error { return nil } + //Temp struct for execStart so that we don't need to transfer all the execConfig + execStartCheck := &types.ExecStartCheck{ + Detach: execConfig.Detach, + Tty: execConfig.Tty, + } + if !execConfig.Detach { if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { return err } } else { - if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, nil)); err != nil { + if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execStartCheck, nil)); err != nil { return err } // For now don't print this - wait for when we support exec wait() @@ -122,7 +124,7 @@ func (cli *DockerCli) CmdExec(args ...string) error { } if status != 0 { - return &utils.StatusError{StatusCode: status} + return StatusError{StatusCode: status} } return nil diff --git a/api/client/export.go b/api/client/export.go index 8f1642f609da9..1ff46f9b5701d 100644 --- a/api/client/export.go +++ b/api/client/export.go @@ -3,7 +3,6 @@ package client import ( "errors" "io" - "net/url" "os" flag "github.com/docker/docker/pkg/mflag" @@ -34,19 +33,9 @@ func (cli *DockerCli) CmdExport(args ...string) error { return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") } - if len(cmd.Args()) == 1 { - image := cmd.Arg(0) - if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil { - return err - } - } else { - v := url.Values{} - for _, arg := range cmd.Args() { - v.Add("names", arg) - } - if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil { - return err - } + image := cmd.Arg(0) + if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil { + return err } return nil diff --git a/api/client/hijack.go b/api/client/hijack.go index 1635384168797..5f4794a5e7d39 100644 --- a/api/client/hijack.go +++ b/api/client/hijack.go @@ -142,6 +142,13 @@ func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.Rea if err != nil { return err } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HttpHeaders { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.Header.Set("Content-Type", "text/plain") req.Header.Set("Connection", "Upgrade") diff --git a/api/client/history.go b/api/client/history.go index 6e0cdb24cde4a..31b8535031f54 100644 --- a/api/client/history.go +++ b/api/client/history.go @@ -9,8 +9,8 @@ import ( "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/units" - "github.com/docker/docker/utils" ) // CmdHistory shows the history of an image. @@ -18,6 +18,7 @@ import ( // Usage: docker history [OPTIONS] IMAGE func (cli *DockerCli) CmdHistory(args ...string) error { cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true) + human := cmd.Bool([]string{"H", "-human"}, true, "Print sizes and dates in human readable format") quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") cmd.Require(flag.Exact, 1) @@ -29,14 +30,13 @@ func (cli *DockerCli) CmdHistory(args ...string) error { } history := []types.ImageHistory{} - err = json.NewDecoder(rdr).Decode(&history) - if err != nil { + if err := json.NewDecoder(rdr).Decode(&history); err != nil { return err } w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") + fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") } for _, entry := range history { @@ -46,14 +46,25 @@ func (cli *DockerCli) CmdHistory(args ...string) error { fmt.Fprintf(w, stringid.TruncateID(entry.ID)) } if !*quiet { - fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) + if *human { + fmt.Fprintf(w, "\t%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0)))) + } else { + fmt.Fprintf(w, "\t%s\t", time.Unix(entry.Created, 0).Format(time.RFC3339)) + } if *noTrunc { fmt.Fprintf(w, "%s\t", entry.CreatedBy) } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(entry.CreatedBy, 45)) + fmt.Fprintf(w, "%s\t", stringutils.Truncate(entry.CreatedBy, 45)) } - fmt.Fprintf(w, "%s", units.HumanSize(float64(entry.Size))) + + if *human { + fmt.Fprintf(w, "%s\t", units.HumanSize(float64(entry.Size))) + } else { + fmt.Fprintf(w, "%d\t", entry.Size) + } + + fmt.Fprintf(w, "%s", entry.Comment) } fmt.Fprintf(w, "\n") } diff --git a/api/client/images.go b/api/client/images.go index b47c6d65f4f0c..e39c473749e41 100644 --- a/api/client/images.go +++ b/api/client/images.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/url" - "strings" "text/tabwriter" "time" @@ -18,74 +17,6 @@ import ( "github.com/docker/docker/utils" ) -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) WalkTree(noTrunc bool, images []*types.Image, byParent map[string][]*types.Image, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *types.Image, prefix string)) { - length := len(images) - if length > 1 { - for index, image := range images { - if index+1 == length { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } else { - printNode(cli, noTrunc, image, prefix+"\u251C─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) - } - } - } - } else { - for _, image := range images { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.ID]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } - } -} - -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) printVizNode(noTrunc bool, image *types.Image, prefix string) { - var ( - imageID string - parentID string - ) - if noTrunc { - imageID = image.ID - parentID = image.ParentId - } else { - imageID = stringid.TruncateID(image.ID) - parentID = stringid.TruncateID(image.ParentId) - } - if parentID == "" { - fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) - } else { - fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) - } - if image.RepoTags[0] != ":" { - fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.RepoTags, "\\n")) - } -} - -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) printTreeNode(noTrunc bool, image *types.Image, prefix string) { - var imageID string - if noTrunc { - imageID = image.ID - } else { - imageID = stringid.TruncateID(image.ID) - } - - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.VirtualSize))) - if image.RepoTags[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", ")) - } else { - fmt.Fprint(cli.out, "\n") - } -} - // CmdImages lists the images in a specified repository, or all top-level images if no repository is specified. // // Usage: docker images [OPTIONS] [REPOSITORY] @@ -95,9 +26,6 @@ func (cli *DockerCli) CmdImages(args ...string) error { all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") - // FIXME: --viz and --tree are deprecated. Remove them in a future version. - flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") flFilter := opts.NewListOpts(nil) cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") @@ -116,158 +44,83 @@ func (cli *DockerCli) CmdImages(args ...string) error { } matchName := cmd.Arg(0) - // FIXME: --viz and --tree are deprecated. Remove them in a future version. - if *flViz || *flTree { - v := url.Values{ - "all": []string{"1"}, - } - if len(imageFilterArgs) > 0 { - filterJSON, err := filters.ToParam(imageFilterArgs) - if err != nil { - return err - } - v.Set("filters", filterJSON) - } - - rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) + v := url.Values{} + if len(imageFilterArgs) > 0 { + filterJSON, err := filters.ToParam(imageFilterArgs) if err != nil { return err } + v.Set("filters", filterJSON) + } - images := []types.Image{} - err = json.NewDecoder(rdr).Decode(&images) - if err != nil { - return err - } - - var ( - printNode func(cli *DockerCli, noTrunc bool, image *types.Image, prefix string) - startImage *types.Image - - roots = []*types.Image{} - byParent = make(map[string][]*types.Image) - ) - - for _, image := range images { - if image.ParentId == "" { - roots = append(roots, &image) - } else { - if children, exists := byParent[image.ParentId]; exists { - children = append(children, &image) - } else { - byParent[image.ParentId] = []*types.Image{&image} - } - } + if cmd.NArg() == 1 { + // FIXME rename this parameter, to not be confused with the filters flag + v.Set("filter", matchName) + } + if *all { + v.Set("all", "1") + } - if matchName != "" { - if matchName == image.ID || matchName == stringid.TruncateID(image.ID) { - startImage = &image - } + rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) + if err != nil { + return err + } - for _, repotag := range image.RepoTags { - if repotag == matchName { - startImage = &image - } - } - } - } + images := []types.Image{} + if err := json.NewDecoder(rdr).Decode(&images); err != nil { + return err + } - if *flViz { - fmt.Fprintf(cli.out, "digraph docker {\n") - printNode = (*DockerCli).printVizNode + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) + if !*quiet { + if *showDigests { + fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } else { - printNode = (*DockerCli).printTreeNode + fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") } + } - if startImage != nil { - root := []*types.Image{startImage} - cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if matchName == "" { - cli.WalkTree(*noTrunc, roots, byParent, "", printNode) - } - if *flViz { - fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") - } - } else { - v := url.Values{} - if len(imageFilterArgs) > 0 { - filterJSON, err := filters.ToParam(imageFilterArgs) - if err != nil { - return err - } - v.Set("filters", filterJSON) + for _, image := range images { + ID := image.ID + if !*noTrunc { + ID = stringid.TruncateID(ID) } - if cmd.NArg() == 1 { - // FIXME rename this parameter, to not be confused with the filters flag - v.Set("filter", matchName) - } - if *all { - v.Set("all", "1") - } + repoTags := image.RepoTags + repoDigests := image.RepoDigests - rdr, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil, nil) - if err != nil { - return err + if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { + // dangling image - clear out either repoTags or repoDigsts so we only show it once below + repoDigests = []string{} } - images := []types.Image{} - err = json.NewDecoder(rdr).Decode(&images) - if err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - if *showDigests { - fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") + // combine the tags and digests lists + tagsAndDigests := append(repoTags, repoDigests...) + for _, repoAndRef := range tagsAndDigests { + repo, ref := parsers.ParseRepositoryTag(repoAndRef) + // default tag and digest to none - if there's a value, it'll be set below + tag := "" + digest := "" + if utils.DigestReference(ref) { + digest = ref } else { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } - } - - for _, image := range images { - ID := image.ID - if !*noTrunc { - ID = stringid.TruncateID(ID) - } - - repoTags := image.RepoTags - repoDigests := image.RepoDigests - - if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { - // dangling image - clear out either repoTags or repoDigsts so we only show it once below - repoDigests = []string{} + tag = ref } - // combine the tags and digests lists - tagsAndDigests := append(repoTags, repoDigests...) - for _, repoAndRef := range tagsAndDigests { - repo, ref := parsers.ParseRepositoryTag(repoAndRef) - // default tag and digest to none - if there's a value, it'll be set below - tag := "" - digest := "" - if utils.DigestReference(ref) { - digest = ref - } else { - tag = ref - } - - if !*quiet { - if *showDigests { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) - } else { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) - } + if !*quiet { + if *showDigests { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) } else { - fmt.Fprintln(w, ID) + fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, ID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(int64(image.Created), 0))), units.HumanSize(float64(image.VirtualSize))) } + } else { + fmt.Fprintln(w, ID) } } + } - if !*quiet { - w.Flush() - } + if !*quiet { + w.Flush() } return nil } diff --git a/api/client/info.go b/api/client/info.go index 0f509d83f9710..432ccac40fcce 100644 --- a/api/client/info.go +++ b/api/client/info.go @@ -1,12 +1,11 @@ package client import ( + "encoding/json" "fmt" "os" - "time" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/units" ) @@ -19,127 +18,74 @@ func (cli *DockerCli) CmdInfo(args ...string) error { cmd.Require(flag.Exact, 0) cmd.ParseFlags(args, false) - body, _, err := readBody(cli.call("GET", "/info", nil, nil)) + rdr, _, err := cli.call("GET", "/info", nil, nil) if err != nil { return err } - out := engine.NewOutput() - remoteInfo, err := out.AddEnv() - if err != nil { - return err + info := &types.Info{} + if err := json.NewDecoder(rdr).Decode(info); err != nil { + return fmt.Errorf("Error reading remote info: %v", err) } - if _, err := out.Write(body); err != nil { - logrus.Errorf("Error reading remote info: %s", err) - return err - } - out.Close() - - if remoteInfo.Exists("Containers") { - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - } - if remoteInfo.Exists("Images") { - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - } - if remoteInfo.Exists("Driver") { - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - } - if remoteInfo.Exists("DriverStatus") { - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err - } - for _, pair := range driverStatus { + fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) + fmt.Fprintf(cli.out, "Images: %d\n", info.Images) + fmt.Fprintf(cli.out, "Storage Driver: %s\n", info.Driver) + if info.DriverStatus != nil { + for _, pair := range info.DriverStatus { fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) } } - if remoteInfo.Exists("ExecutionDriver") { - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - } - if remoteInfo.Exists("LoggingDriver") { - fmt.Fprintf(cli.out, "Logging Driver: %s\n", remoteInfo.Get("LoggingDriver")) - } - if remoteInfo.Exists("KernelVersion") { - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - } - if remoteInfo.Exists("OperatingSystem") { - fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) - } - if remoteInfo.Exists("NCPU") { - fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) - } - if remoteInfo.Exists("MemTotal") { - fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) - } - if remoteInfo.Exists("Name") { - fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name")) - } - if remoteInfo.Exists("ID") { - fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) - } + fmt.Fprintf(cli.out, "Execution Driver: %s\n", info.ExecutionDriver) + fmt.Fprintf(cli.out, "Logging Driver: %s\n", info.LoggingDriver) + fmt.Fprintf(cli.out, "Kernel Version: %s\n", info.KernelVersion) + fmt.Fprintf(cli.out, "Operating System: %s\n", info.OperatingSystem) + fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) + fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) + fmt.Fprintf(cli.out, "Name: %s\n", info.Name) + fmt.Fprintf(cli.out, "ID: %s\n", info.ID) - if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - if remoteInfo.Exists("Debug") { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) - } + if info.Debug || os.Getenv("DEBUG") != "" { + fmt.Fprintf(cli.out, "Debug mode (server): %v\n", info.Debug) fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - if remoteInfo.Exists("NFd") { - fmt.Fprintf(cli.out, "File Descriptors: %d\n", remoteInfo.GetInt("NFd")) - } - if remoteInfo.Exists("NGoroutines") { - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - } - if remoteInfo.Exists("SystemTime") { - t, err := remoteInfo.GetTime("SystemTime") - if err != nil { - logrus.Errorf("Error reading system time: %v", err) - } else { - fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate)) - } - } - if remoteInfo.Exists("NEventsListener") { - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - } - if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { - fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) - } - if initPath := remoteInfo.Get("InitPath"); initPath != "" { - fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) - } - if root := remoteInfo.Get("DockerRootDir"); root != "" { - fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root) - } + fmt.Fprintf(cli.out, "File Descriptors: %d\n", info.NFd) + fmt.Fprintf(cli.out, "Goroutines: %d\n", info.NGoroutines) + fmt.Fprintf(cli.out, "System Time: %s\n", info.SystemTime) + fmt.Fprintf(cli.out, "EventsListeners: %d\n", info.NEventsListener) + fmt.Fprintf(cli.out, "Init SHA1: %s\n", info.InitSha1) + fmt.Fprintf(cli.out, "Init Path: %s\n", info.InitPath) + fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) } - if remoteInfo.Exists("HttpProxy") { - fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy")) + + if info.HttpProxy != "" { + fmt.Fprintf(cli.out, "Http Proxy: %s\n", info.HttpProxy) } - if remoteInfo.Exists("HttpsProxy") { - fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy")) + if info.HttpsProxy != "" { + fmt.Fprintf(cli.out, "Https Proxy: %s\n", info.HttpsProxy) } - if remoteInfo.Exists("NoProxy") { - fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy")) + if info.NoProxy != "" { + fmt.Fprintf(cli.out, "No Proxy: %s\n", info.NoProxy) } - if len(remoteInfo.GetList("IndexServerAddress")) != 0 { - cli.LoadConfigFile() - u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username + + if info.IndexServerAddress != "" { + u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username if len(u) > 0 { fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) + fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) } } - if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") { + if !info.MemoryLimit { fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") } - if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") { + if !info.SwapLimit { fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") } - if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { + if !info.IPv4Forwarding { fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") } - if remoteInfo.Exists("Labels") { + if info.Labels != nil { fmt.Fprintln(cli.out, "Labels:") - for _, attribute := range remoteInfo.GetList("Labels") { + for _, attribute := range info.Labels { fmt.Fprintf(cli.out, " %s\n", attribute) } } diff --git a/api/client/inspect.go b/api/client/inspect.go index 0f47480b149db..0f327cb4db8d2 100644 --- a/api/client/inspect.go +++ b/api/client/inspect.go @@ -8,13 +8,14 @@ import ( "strings" "text/template" + "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/utils" ) // CmdInspect displays low-level information on one or more containers or images. // // Usage: docker inspect [OPTIONS] CONTAINER|IMAGE [CONTAINER|IMAGE...] + func (cli *DockerCli) CmdInspect(args ...string) error { cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true) tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") @@ -27,19 +28,21 @@ func (cli *DockerCli) CmdInspect(args ...string) error { var err error if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) - return &utils.StatusError{StatusCode: 64, + return StatusError{StatusCode: 64, Status: "Template parsing error: " + err.Error()} } } indented := new(bytes.Buffer) - indented.WriteByte('[') + indented.WriteString("[\n") status := 0 + isImage := false for _, name := range cmd.Args() { obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, nil)) if err != nil { obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, nil)) + isImage = true if err != nil { if strings.Contains(err.Error(), "No such") { fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) @@ -58,15 +61,29 @@ func (cli *DockerCli) CmdInspect(args ...string) error { continue } } else { - // Has template, will render - var value interface{} - if err := json.Unmarshal(obj, &value); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - if err := tmpl.Execute(cli.out, value); err != nil { - return err + dec := json.NewDecoder(bytes.NewReader(obj)) + + if isImage { + inspPtr := types.ImageInspect{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + return err + } + } else { + inspPtr := types.ContainerJSON{} + if err := dec.Decode(&inspPtr); err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + status = 1 + continue + } + if err := tmpl.Execute(cli.out, inspPtr); err != nil { + return err + + } } cli.out.Write([]byte{'\n'}) } @@ -86,7 +103,7 @@ func (cli *DockerCli) CmdInspect(args ...string) error { } if status != 0 { - return &utils.StatusError{StatusCode: status} + return StatusError{StatusCode: status} } return nil } diff --git a/api/client/login.go b/api/client/login.go index b24ef7df7e03c..d7da1de2b04f4 100644 --- a/api/client/login.go +++ b/api/client/login.go @@ -6,11 +6,10 @@ import ( "fmt" "io" "os" - "path" "strings" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/cliconfig" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/term" "github.com/docker/docker/registry" @@ -56,10 +55,9 @@ func (cli *DockerCli) CmdLogin(args ...string) error { return string(line) } - cli.LoadConfigFile() - authconfig, ok := cli.configFile.Configs[serverAddress] + authconfig, ok := cli.configFile.AuthConfigs[serverAddress] if !ok { - authconfig = registry.AuthConfig{} + authconfig = cliconfig.AuthConfig{} } if username == "" { @@ -113,12 +111,14 @@ func (cli *DockerCli) CmdLogin(args ...string) error { authconfig.Password = password authconfig.Email = email authconfig.ServerAddress = serverAddress - cli.configFile.Configs[serverAddress] = authconfig + cli.configFile.AuthConfigs[serverAddress] = authconfig - stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], nil) + stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.AuthConfigs[serverAddress], nil) if statusCode == 401 { - delete(cli.configFile.Configs, serverAddress) - registry.SaveConfig(cli.configFile) + delete(cli.configFile.AuthConfigs, serverAddress) + if err2 := cli.configFile.Save(); err2 != nil { + fmt.Fprintf(cli.out, "WARNING: could not save config file: %v\n", err2) + } return err } if err != nil { @@ -127,12 +127,15 @@ func (cli *DockerCli) CmdLogin(args ...string) error { var response types.AuthResponse if err := json.NewDecoder(stream).Decode(&response); err != nil { - cli.configFile, _ = registry.LoadConfig(homedir.Get()) + // Upon error, remove entry + delete(cli.configFile.AuthConfigs, serverAddress) return err } - registry.SaveConfig(cli.configFile) - fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE)) + if err := cli.configFile.Save(); err != nil { + return fmt.Errorf("Error saving config file: %v", err) + } + fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s\n", cli.configFile.Filename()) if response.Status != "" { fmt.Fprintf(cli.out, "%s\n", response.Status) diff --git a/api/client/logout.go b/api/client/logout.go index 9282f22f0c2ac..74d0c278faae9 100644 --- a/api/client/logout.go +++ b/api/client/logout.go @@ -22,14 +22,13 @@ func (cli *DockerCli) CmdLogout(args ...string) error { serverAddress = cmd.Arg(0) } - cli.LoadConfigFile() - if _, ok := cli.configFile.Configs[serverAddress]; !ok { + if _, ok := cli.configFile.AuthConfigs[serverAddress]; !ok { fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) } else { fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) - delete(cli.configFile.Configs, serverAddress) + delete(cli.configFile.AuthConfigs, serverAddress) - if err := registry.SaveConfig(cli.configFile); err != nil { + if err := cli.configFile.Save(); err != nil { return fmt.Errorf("Failed to save docker config: %v", err) } } diff --git a/api/client/logs.go b/api/client/logs.go index 9039ecf090a20..5e5dd9dd8bcb1 100644 --- a/api/client/logs.go +++ b/api/client/logs.go @@ -1,10 +1,11 @@ package client import ( + "encoding/json" "fmt" "net/url" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" ) @@ -29,12 +30,12 @@ func (cli *DockerCli) CmdLogs(args ...string) error { return err } - env := engine.Env{} - if err := env.Decode(stream); err != nil { + var c types.ContainerJSON + if err := json.NewDecoder(stream).Decode(&c); err != nil { return err } - if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" { + if c.HostConfig.LogConfig.Type != "json-file" { return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver") } @@ -51,5 +52,5 @@ func (cli *DockerCli) CmdLogs(args ...string) error { } v.Set("tail", *tail) - return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) + return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), c.Config.Tty, nil, cli.out, cli.err, nil) } diff --git a/api/client/ps.go b/api/client/ps.go index be20d7a6f6343..6c40c6867b871 100644 --- a/api/client/ps.go +++ b/api/client/ps.go @@ -15,8 +15,8 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers/filters" "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/pkg/units" - "github.com/docker/docker/utils" ) // CmdPs outputs a list of Docker containers. @@ -92,8 +92,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { } containers := []types.Container{} - err = json.NewDecoder(rdr).Decode(&containers) - if err != nil { + if err := json.NewDecoder(rdr).Decode(&containers); err != nil { return err } @@ -135,7 +134,7 @@ func (cli *DockerCli) CmdPs(args ...string) error { ) if !*noTrunc { - command = utils.Trunc(command, 20) + command = stringutils.Truncate(command, 20) // only display the default name for the container with notrunc is passed for _, name := range names { diff --git a/api/client/pull.go b/api/client/pull.go index a554e1f4568de..17abe4bb65b0c 100644 --- a/api/client/pull.go +++ b/api/client/pull.go @@ -42,8 +42,6 @@ func (cli *DockerCli) CmdPull(args ...string) error { return err } - cli.LoadConfigFile() - _, _, err = cli.clientRequestAttemptLogin("POST", "/images/create?"+v.Encode(), nil, cli.out, repoInfo.Index, "pull") return err } diff --git a/api/client/push.go b/api/client/push.go index a31a04ed448ba..dc4266cb757db 100644 --- a/api/client/push.go +++ b/api/client/push.go @@ -20,8 +20,6 @@ func (cli *DockerCli) CmdPush(args ...string) error { name := cmd.Arg(0) - cli.LoadConfigFile() - remote, tag := parsers.ParseRepositoryTag(name) // Resolve the Repository name from fqn to RepositoryInfo @@ -30,7 +28,7 @@ func (cli *DockerCli) CmdPush(args ...string) error { return err } // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) + authConfig := registry.ResolveAuthConfig(cli.configFile, repoInfo.Index) // If we're not using a custom registry, we know the restrictions // applied to repository names and can warn the user in advance. // Custom repositories can have different rules, and we must also diff --git a/api/client/rename.go b/api/client/rename.go index 278f471f2371d..ebe16963ddf19 100644 --- a/api/client/rename.go +++ b/api/client/rename.go @@ -1,20 +1,19 @@ package client -import "fmt" +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) // CmdRename renames a container. // // Usage: docker rename OLD_NAME NEW_NAME func (cli *DockerCli) CmdRename(args ...string) error { cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true) - if err := cmd.Parse(args); err != nil { - return nil - } + cmd.Require(flag.Exact, 2) + cmd.ParseFlags(args, true) - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } oldName := cmd.Arg(0) newName := cmd.Arg(1) diff --git a/api/client/restore.go b/api/client/restore.go new file mode 100644 index 0000000000000..11e3a362e3fa7 --- /dev/null +++ b/api/client/restore.go @@ -0,0 +1,48 @@ +package client + +import ( + "fmt" + + "github.com/docker/libcontainer" +) + +func (cli *DockerCli) CmdRestore(args ...string) error { + cmd := cli.Subcmd("restore", "CONTAINER [CONTAINER...]", "Restore one or more checkpointed containers", true) + + var ( + flImgDir = cmd.String([]string{"-image-dir"}, "", "(optional) directory to restore image files from") + flWorkDir = cmd.String([]string{"-work-dir"}, "", "directory to store temp files and restore.log") + flCheckTcp = cmd.Bool([]string{"-allow-tcp"}, false, "allow restoring tcp connections") + flExtUnix = cmd.Bool([]string{"-allow-ext-unix"}, false, "allow restoring external unix connections") + flShell = cmd.Bool([]string{"-allow-shell"}, false, "allow restoring shell jobs") + ) + + if err := cmd.ParseFlags(args, true); err != nil { + return err + } + + if cmd.NArg() < 1 { + cmd.Usage() + return nil + } + + criuOpts := &libcontainer.CriuOpts{ + ImagesDirectory: *flImgDir, + WorkDirectory: *flWorkDir, + TcpEstablished: *flCheckTcp, + ExternalUnixConnections: *flExtUnix, + ShellJob: *flShell, + } + + var encounteredError error + for _, name := range cmd.Args() { + _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restore", criuOpts, nil)) + if err != nil { + fmt.Fprintf(cli.err, "%s\n", err) + encounteredError = fmt.Errorf("Error: failed to restore one or more containers") + } else { + fmt.Fprintf(cli.out, "%s\n", name) + } + } + return encounteredError +} diff --git a/api/client/rm.go b/api/client/rm.go index 89b11825431df..1ecc0d65727c1 100644 --- a/api/client/rm.go +++ b/api/client/rm.go @@ -7,6 +7,9 @@ import ( flag "github.com/docker/docker/pkg/mflag" ) +// CmdRm removes one or more containers. +// +// Usage: docker rm [OPTIONS] CONTAINER [CONTAINER...] func (cli *DockerCli) CmdRm(args ...string) error { cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true) v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") @@ -30,6 +33,10 @@ func (cli *DockerCli) CmdRm(args ...string) error { var encounteredError error for _, name := range cmd.Args() { + if name == "" { + return fmt.Errorf("Container name cannot be empty") + } + _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, nil)) if err != nil { fmt.Fprintf(cli.err, "%s\n", err) diff --git a/api/client/rmi.go b/api/client/rmi.go index 11c9ff32d04d8..a8590dc8203fd 100644 --- a/api/client/rmi.go +++ b/api/client/rmi.go @@ -37,8 +37,7 @@ func (cli *DockerCli) CmdRmi(args ...string) error { encounteredError = fmt.Errorf("Error: failed to remove one or more images") } else { dels := []types.ImageDelete{} - err = json.NewDecoder(rdr).Decode(&dels) - if err != nil { + if err := json.NewDecoder(rdr).Decode(&dels); err != nil { fmt.Fprintf(cli.err, "%s\n", err) encounteredError = fmt.Errorf("Error: failed to remove one or more images") continue diff --git a/api/client/run.go b/api/client/run.go index 474c88f98150e..628e725f1bfba 100644 --- a/api/client/run.go +++ b/api/client/run.go @@ -12,7 +12,6 @@ import ( "github.com/docker/docker/pkg/resolvconf" "github.com/docker/docker/pkg/signal" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) func (cid *cidFile) Close() error { @@ -44,10 +43,10 @@ func (cli *DockerCli) CmdRun(args ...string) error { // These are flags not stored in Config/HostConfig var ( - flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits") + flAutoRemove = cmd.Bool([]string{"-rm"}, false, "Automatically remove the container when it exits") flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") - flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process") - flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") + flSigProxy = cmd.Bool([]string{"-sig-proxy"}, true, "Proxy received signals to the process") + flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") flAttach *opts.ListOpts ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") @@ -242,7 +241,7 @@ func (cli *DockerCli) CmdRun(args ...string) error { } } if status != 0 { - return &utils.StatusError{StatusCode: status} + return StatusError{StatusCode: status} } return nil } diff --git a/api/client/search.go b/api/client/search.go index 8f4eb0b301223..e606d479f14d2 100644 --- a/api/client/search.go +++ b/api/client/search.go @@ -10,10 +10,11 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/docker/registry" - "github.com/docker/docker/utils" ) +// ByStars sorts search results in ascending order by number of stars. type ByStars []registry.SearchResult func (r ByStars) Len() int { return len(r) } @@ -44,16 +45,13 @@ func (cli *DockerCli) CmdSearch(args ...string) error { return err } - cli.LoadConfigFile() - rdr, _, err := cli.clientRequestAttemptLogin("GET", "/images/search?"+v.Encode(), nil, nil, repoInfo.Index, "search") if err != nil { return err } results := ByStars{} - err = json.NewDecoder(rdr).Decode(&results) - if err != nil { + if err := json.NewDecoder(rdr).Decode(&results); err != nil { return err } @@ -68,7 +66,7 @@ func (cli *DockerCli) CmdSearch(args ...string) error { desc := strings.Replace(res.Description, "\n", " ", -1) desc = strings.Replace(desc, "\r", " ", -1) if !*noTrunc && len(desc) > 45 { - desc = utils.Trunc(desc, 42) + "..." + desc = stringutils.Truncate(desc, 42) + "..." } fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) if res.IsOfficial { diff --git a/api/client/start.go b/api/client/start.go index 66aa5150db7de..d3dec9489d14a 100644 --- a/api/client/start.go +++ b/api/client/start.go @@ -11,7 +11,6 @@ import ( flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/utils" ) func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { @@ -156,7 +155,7 @@ func (cli *DockerCli) CmdStart(args ...string) error { return err } if status != 0 { - return &utils.StatusError{StatusCode: status} + return StatusError{StatusCode: status} } } return nil diff --git a/api/client/stats.go b/api/client/stats.go index bf9d3a814538e..b2dd36d683482 100644 --- a/api/client/stats.go +++ b/api/client/stats.go @@ -99,9 +99,9 @@ func (s *containerStats) Display(w io.Writer) error { fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", s.Name, s.CPUPercentage, - units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit), + units.HumanSize(s.Memory), units.HumanSize(s.MemoryLimit), s.MemoryPercentage, - units.BytesSize(s.NetworkRx), units.BytesSize(s.NetworkTx)) + units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx)) return nil } @@ -145,7 +145,7 @@ func (cli *DockerCli) CmdStats(args ...string) error { if len(errs) > 0 { return fmt.Errorf("%s", strings.Join(errs, ", ")) } - for _ = range time.Tick(500 * time.Millisecond) { + for range time.Tick(500 * time.Millisecond) { printHeader() toRemove := []int{} for i, s := range cStats { diff --git a/api/client/stats_unit_test.go b/api/client/stats_unit_test.go new file mode 100644 index 0000000000000..0831dbcbbe9dc --- /dev/null +++ b/api/client/stats_unit_test.go @@ -0,0 +1,29 @@ +package client + +import ( + "bytes" + "sync" + "testing" +) + +func TestDisplay(t *testing.T) { + c := &containerStats{ + Name: "app", + CPUPercentage: 30.0, + Memory: 100 * 1024 * 1024.0, + MemoryLimit: 2048 * 1024 * 1024.0, + MemoryPercentage: 100.0 / 2048.0 * 100.0, + NetworkRx: 100 * 1024 * 1024, + NetworkTx: 800 * 1024 * 1024, + mu: sync.RWMutex{}, + } + var b bytes.Buffer + if err := c.Display(&b); err != nil { + t.Fatalf("c.Display() gave error: %s", err) + } + got := b.String() + want := "app\t30.00%\t104.9 MB/2.147 GB\t4.88%\t104.9 MB/838.9 MB\n" + if got != want { + t.Fatalf("c.Display() = %q, want %q", got, want) + } +} diff --git a/api/client/top.go b/api/client/top.go index 9de04cac68ef3..ee16fdbf605ae 100644 --- a/api/client/top.go +++ b/api/client/top.go @@ -1,12 +1,13 @@ package client import ( + "encoding/json" "fmt" "net/url" "strings" "text/tabwriter" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" flag "github.com/docker/docker/pkg/mflag" ) @@ -28,17 +29,16 @@ func (cli *DockerCli) CmdTop(args ...string) error { if err != nil { return err } - var procs engine.Env - if err := procs.Decode(stream); err != nil { + + procList := types.ContainerProcessList{} + if err := json.NewDecoder(stream).Decode(&procList); err != nil { return err } + w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - return err - } - for _, proc := range processes { + fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) + + for _, proc := range procList.Processes { fmt.Fprintln(w, strings.Join(proc, "\t")) } w.Flush() diff --git a/api/client/utils.go b/api/client/utils.go index cf11fefe5bba1..7a52ad25f47aa 100644 --- a/api/client/utils.go +++ b/api/client/utils.go @@ -19,7 +19,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" + "github.com/docker/docker/api/types" "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/signal" @@ -29,9 +31,10 @@ import ( ) var ( - ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") + errConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") ) +// HTTPClient creates a new HTP client with the cli's client transport instance. func (cli *DockerCli) HTTPClient() *http.Client { return &http.Client{Transport: cli.transport} } @@ -65,6 +68,13 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m if err != nil { return nil, "", -1, err } + + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.configFile.HttpHeaders { + req.Header.Set(k, v) + } + req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) req.URL.Host = cli.addr req.URL.Scheme = cli.scheme @@ -86,7 +96,7 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m } if err != nil { if strings.Contains(err.Error(), "connection refused") { - return nil, "", statusCode, ErrConnectionRefused + return nil, "", statusCode, errConnectionRefused } if cli.tlsConfig == nil { @@ -110,7 +120,7 @@ func (cli *DockerCli) clientRequest(method, path string, in io.Reader, headers m } func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reader, out io.Writer, index *registry.IndexInfo, cmdName string) (io.ReadCloser, int, error) { - cmdAttempt := func(authConfig registry.AuthConfig) (io.ReadCloser, int, error) { + cmdAttempt := func(authConfig cliconfig.AuthConfig) (io.ReadCloser, int, error) { buf, err := json.Marshal(authConfig) if err != nil { return nil, -1, err @@ -141,14 +151,14 @@ func (cli *DockerCli) clientRequestAttemptLogin(method, path string, in io.Reade } // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(index) + authConfig := registry.ResolveAuthConfig(cli.configFile, index) body, statusCode, err := cmdAttempt(authConfig) if statusCode == http.StatusUnauthorized { fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) if err = cli.CmdLogin(index.GetAuthConfigKey()); err != nil { return nil, -1, err } - authConfig = cli.configFile.ResolveAuthConfig(index) + authConfig = registry.ResolveAuthConfig(cli.configFile, index) return cmdAttempt(authConfig) } return body, statusCode, err @@ -230,11 +240,12 @@ func waitForExit(cli *DockerCli, containerID string) (int, error) { return -1, err } - var out engine.Env - if err := out.Decode(stream); err != nil { + var res types.ContainerWaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { return -1, err } - return out.GetInt("StatusCode"), nil + + return res.StatusCode, nil } // getExitCode perform an inspect on the container. It returns @@ -243,19 +254,18 @@ func getExitCode(cli *DockerCli, containerID string) (bool, int, error) { stream, _, err := cli.call("GET", "/containers/"+containerID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { + if err != errConnectionRefused { return false, -1, err } return false, -1, nil } - var result engine.Env - if err := result.Decode(stream); err != nil { + var c types.ContainerJSON + if err := json.NewDecoder(stream).Decode(&c); err != nil { return false, -1, err } - state := result.GetSubEnv("State") - return state.GetBool("Running"), state.GetInt("ExitCode"), nil + return c.State.Running, c.State.ExitCode, nil } // getExecExitCode perform an inspect on the exec command. It returns @@ -264,18 +274,24 @@ func getExecExitCode(cli *DockerCli, execID string) (bool, int, error) { stream, _, err := cli.call("GET", "/exec/"+execID+"/json", nil, nil) if err != nil { // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { + if err != errConnectionRefused { return false, -1, err } return false, -1, nil } - var result engine.Env - if err := result.Decode(stream); err != nil { + //TODO: Should we reconsider having a type in api/types? + //this is a response to exex/id/json not container + var c struct { + Running bool + ExitCode int + } + + if err := json.NewDecoder(stream).Decode(&c); err != nil { return false, -1, err } - return result.GetBool("Running"), result.GetInt("ExitCode"), nil + return c.Running, c.ExitCode, nil } func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { @@ -299,7 +315,7 @@ func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { sigchan := make(chan os.Signal, 1) gosignal.Notify(sigchan, signal.SIGWINCH) go func() { - for _ = range sigchan { + for range sigchan { cli.resizeTty(id, isExec) } }() diff --git a/api/common.go b/api/common.go index 693df38876b46..4a9523cd45c97 100644 --- a/api/common.go +++ b/api/common.go @@ -10,27 +10,16 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/version" "github.com/docker/libtrust" ) // Common constants for daemon and client. const ( - APIVERSION version.Version = "1.19" // Current REST API version - DEFAULTHTTPHOST = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 - DEFAULTUNIXSOCKET = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket - DefaultDockerfileName string = "Dockerfile" // Default filename with Docker commands, read by docker build + APIVERSION version.Version = "1.19" // Current REST API version + DefaultDockerfileName string = "Dockerfile" // Default filename with Docker commands, read by docker build ) -func ValidateHost(val string) (string, error) { - host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) - if err != nil { - return val, err - } - return host, nil -} - type ByPrivatePort []types.Port func (r ByPrivatePort) Len() int { return len(r) } @@ -118,8 +107,7 @@ func MatchesContentType(contentType, expectedType string) bool { // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700) - if err != nil { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { return nil, err } trustKey, err := libtrust.LoadKeyFile(trustKeyPath) diff --git a/api/server/form.go b/api/server/form.go new file mode 100644 index 0000000000000..af1cd2075e738 --- /dev/null +++ b/api/server/form.go @@ -0,0 +1,20 @@ +package server + +import ( + "net/http" + "strconv" + "strings" +) + +func boolValue(r *http.Request, k string) bool { + s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) + return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") +} + +func int64Value(r *http.Request, k string) int64 { + val, err := strconv.ParseInt(r.FormValue(k), 10, 64) + if err != nil { + return 0 + } + return val +} diff --git a/api/server/form_test.go b/api/server/form_test.go new file mode 100644 index 0000000000000..5cf6c82c14dfd --- /dev/null +++ b/api/server/form_test.go @@ -0,0 +1,55 @@ +package server + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBoolValue(t *testing.T) { + cases := map[string]bool{ + "": false, + "0": false, + "no": false, + "false": false, + "none": false, + "1": true, + "yes": true, + "true": true, + "one": true, + "100": true, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := boolValue(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} + +func TestInt64Value(t *testing.T) { + cases := map[string]int64{ + "": 0, + "asdf": 0, + "0": 0, + "1": 1, + } + + for c, e := range cases { + v := url.Values{} + v.Set("test", c) + r, _ := http.NewRequest("POST", "", nil) + r.Form = v + + a := int64Value(r, "test") + if a != e { + t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) + } + } +} diff --git a/api/server/server.go b/api/server/server.go index 97bf08bb0680a..cc2bffd992560 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -1,8 +1,7 @@ package server import ( - "bufio" - "bytes" + "runtime" "time" "encoding/base64" @@ -21,6 +20,9 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/api" "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/builder" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/networkdriver/bridge" "github.com/docker/docker/engine" @@ -28,16 +30,97 @@ import ( "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/parsers/filters" + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/signal" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/version" - "github.com/docker/docker/registry" + "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" + "github.com/docker/libcontainer" ) -var ( - activationLock = make(chan struct{}) -) +type ServerConfig struct { + Logging bool + EnableCors bool + CorsHeaders string + Version string + SocketGroup string + Tls bool + TlsVerify bool + TlsCa string + TlsCert string + TlsKey string +} + +type Server struct { + daemon *daemon.Daemon + cfg *ServerConfig + router *mux.Router + start chan struct{} + + // TODO: delete engine + eng *engine.Engine +} + +func New(cfg *ServerConfig, eng *engine.Engine) *Server { + srv := &Server{ + cfg: cfg, + start: make(chan struct{}), + eng: eng, + } + r := createRouter(srv, eng) + srv.router = r + return srv +} + +func (s *Server) SetDaemon(d *daemon.Daemon) { + s.daemon = d +} + +type serverCloser interface { + Serve() error + Close() error +} + +// ServeApi loops through all of the protocols sent in to docker and spawns +// off a go routine to setup a serving http.Server for each. +func (s *Server) ServeApi(protoAddrs []string) error { + var chErrors = make(chan error, len(protoAddrs)) + + for _, protoAddr := range protoAddrs { + protoAddrParts := strings.SplitN(protoAddr, "://", 2) + if len(protoAddrParts) != 2 { + return fmt.Errorf("bad format, expected PROTO://ADDR") + } + go func(proto, addr string) { + logrus.Infof("Listening for HTTP on %s (%s)", proto, addr) + srv, err := s.newServer(proto, addr) + if err != nil { + chErrors <- err + return + } + s.eng.OnShutdown(func() { + if err := srv.Close(); err != nil { + logrus.Error(err) + } + }) + if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { + err = nil + } + chErrors <- err + }(protoAddrParts[0], protoAddrParts[1]) + } + + for i := 0; i < len(protoAddrs); i++ { + err := <-chErrors + if err != nil { + return err + } + } + + return nil +} type HttpServer struct { srv *http.Server @@ -112,37 +195,32 @@ func parseMultipartForm(r *http.Request) error { } func httpError(w http.ResponseWriter, err error) { + if err == nil || w == nil { + logrus.WithFields(logrus.Fields{"error": err, "writer": w}).Error("unexpected HTTP error handling") + return + } statusCode := http.StatusInternalServerError // FIXME: this is brittle and should not be necessary. // If we need to differentiate between different possible error types, we should // create appropriate error types with clearly defined meaning. errStr := strings.ToLower(err.Error()) - if strings.Contains(errStr, "no such") { - statusCode = http.StatusNotFound - } else if strings.Contains(errStr, "bad parameter") { - statusCode = http.StatusBadRequest - } else if strings.Contains(errStr, "conflict") { - statusCode = http.StatusConflict - } else if strings.Contains(errStr, "impossible") { - statusCode = http.StatusNotAcceptable - } else if strings.Contains(errStr, "wrong login/password") { - statusCode = http.StatusUnauthorized - } else if strings.Contains(errStr, "hasn't been activated") { - statusCode = http.StatusForbidden - } - - if err != nil { - logrus.Errorf("HTTP Error: statusCode=%d %v", statusCode, err) - http.Error(w, err.Error(), statusCode) + for keyword, status := range map[string]int{ + "not found": http.StatusNotFound, + "no such": http.StatusNotFound, + "bad parameter": http.StatusBadRequest, + "conflict": http.StatusConflict, + "impossible": http.StatusNotAcceptable, + "wrong login/password": http.StatusUnauthorized, + "hasn't been activated": http.StatusForbidden, + } { + if strings.Contains(errStr, keyword) { + statusCode = status + break + } } -} -// writeJSONEnv writes the engine.Env values to the http response stream as a -// json encoded body. -func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return v.Encode(w) + logrus.WithFields(logrus.Fields{"statusCode": statusCode, "err": err}).Error("HTTP Error") + http.Error(w, err.Error(), statusCode) } // writeJSON writes the value v to the http response stream as json with standard @@ -153,28 +231,23 @@ func writeJSON(w http.ResponseWriter, code int, v interface{}) error { return json.NewEncoder(w).Encode(v) } -func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { +func streamJSON(out *engine.Output, w http.ResponseWriter, flush bool) { w.Header().Set("Content-Type", "application/json") if flush { - job.Stdout.Add(utils.NewWriteFlusher(w)) + out.Add(utils.NewWriteFlusher(w)) } else { - job.Stdout.Add(w) + out.Add(w) } } -func getDaemon(eng *engine.Engine) *daemon.Daemon { - return eng.HackGetGlobalVar("httpapi.daemon").(*daemon.Daemon) -} - -func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *registry.AuthConfig +func (s *Server) postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + var config *cliconfig.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } - d := getDaemon(eng) - status, err := d.RegistryService.Auth(config) + status, err := s.daemon.RegistryService.Auth(config) if err != nil { return err } @@ -183,31 +256,60 @@ func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter }) } -func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil + + v := &types.Version{ + Version: dockerversion.VERSION, + ApiVersion: api.APIVERSION, + GitCommit: dockerversion.GITCOMMIT, + GoVersion: runtime.Version(), + Os: runtime.GOOS, + Arch: runtime.GOARCH, + } + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + v.KernelVersion = kernelVersion.String() + } + + return writeJSON(w, http.StatusOK, v) } -func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } - job := eng.Job("kill", vars["name"]) - if sig := r.Form.Get("signal"); sig != "" { - job.Args = append(job.Args, sig) + + var sig uint64 + name := vars["name"] + + // If we have a signal, look at it. Otherwise, do nothing + if sigStr := vars["signal"]; sigStr != "" { + // Check if we passed the signal as a number: + // The largest legal signal is 31, so let's parse on 5 bits + sig, err := strconv.ParseUint(sigStr, 10, 5) + if err != nil { + // The signal is not a number, treat it as a string (either like + // "KILL" or like "SIGKILL") + sig = uint64(signal.SignalMap[strings.TrimPrefix(sigStr, "SIG")]) + } + + if sig == 0 { + return fmt.Errorf("Invalid signal: %s", sigStr) + } } - if err := job.Run(); err != nil { + + if err := s.daemon.ContainerKill(name, sig); err != nil { return err } + w.WriteHeader(http.StatusNoContent) return nil } -func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } @@ -216,8 +318,7 @@ func postContainersPause(eng *engine.Engine, version version.Version, w http.Res } name := vars["name"] - d := getDaemon(eng) - cont, err := d.Get(name) + cont, err := s.daemon.Get(name) if err != nil { return err } @@ -232,7 +333,7 @@ func postContainersPause(eng *engine.Engine, version version.Version, w http.Res return nil } -func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } @@ -241,8 +342,7 @@ func postContainersUnpause(eng *engine.Engine, version version.Version, w http.R } name := vars["name"] - d := getDaemon(eng) - cont, err := d.Get(name) + cont, err := s.daemon.Get(name) if err != nil { return err } @@ -257,19 +357,15 @@ func postContainersUnpause(eng *engine.Engine, version version.Version, w http.R return nil } -func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - job := eng.Job("export", vars["name"]) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - return err - } - return nil + + return s.daemon.ContainerExport(vars["name"], w) } -func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -278,10 +374,10 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW Filters: r.Form.Get("filters"), // FIXME this parameter could just be a match filter Filter: r.Form.Get("filter"), - All: toBool(r.Form.Get("all")), + All: boolValue(r, "all"), } - images, err := getDaemon(eng).Repositories().Images(&imagesConfig) + images, err := s.daemon.Repositories().Images(&imagesConfig) if err != nil { return err } @@ -310,22 +406,18 @@ func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseW return writeJSON(w, http.StatusOK, legacyImages) } -func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.GreaterThan("1.6") { - w.WriteHeader(http.StatusNotFound) - return fmt.Errorf("This is now implemented in the client.") +func (s *Server) getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + w.Header().Set("Content-Type", "application/json") + + info, err := s.daemon.SystemInfo() + if err != nil { + return err } - eng.ServeHTTP(w, r) - return nil -} -func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil + return writeJSON(w, http.StatusOK, info) } -func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -376,7 +468,7 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite return true } - d := getDaemon(eng) + d := s.daemon es := d.EventsService w.Header().Set("Content-Type", "application/json") enc := json.NewEncoder(utils.NewWriteFlusher(w)) @@ -429,32 +521,27 @@ func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWrite } } -func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("history", vars["name"]) - streamJSON(job, w, false) - - if err := job.Run(); err != nil { + name := vars["name"] + history, err := s.daemon.Repositories().History(name) + if err != nil { return err } - return nil + + return writeJSON(w, http.StatusOK, history) } -func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } name := vars["name"] - if name == "" { - return fmt.Errorf("Container name cannot be empty") - } - - d := getDaemon(eng) - cont, err := d.Get(name) + cont, err := s.daemon.Get(name) if err != nil { return err } @@ -467,44 +554,49 @@ func getContainersChanges(eng *engine.Engine, version version.Version, w http.Re return writeJSON(w, http.StatusOK, changes) } -func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.4") { return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") } + if vars == nil { return fmt.Errorf("Missing parameter") } + if err := parseForm(r); err != nil { return err } - job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) - streamJSON(job, w, false) - return job.Run() + procList, err := s.daemon.ContainerTop(vars["name"], r.Form.Get("ps_args")) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, procList) } -func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var err error - if err = parseForm(r); err != nil { +func (s *Server) getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if err := parseForm(r); err != nil { return err } config := &daemon.ContainersConfig{ - All: toBool(r.Form.Get("all")), - Size: toBool(r.Form.Get("size")), + All: boolValue(r, "all"), + Size: boolValue(r, "size"), Since: r.Form.Get("since"), Before: r.Form.Get("before"), Filters: r.Form.Get("filters"), } if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - config.Limit, err = strconv.Atoi(tmpLimit) + limit, err := strconv.Atoi(tmpLimit) if err != nil { return err } + config.Limit = limit } - containers, err := getDaemon(eng).Containers(config) + containers, err := s.daemon.Containers(config) if err != nil { return err } @@ -512,20 +604,18 @@ func getContainersJSON(eng *engine.Engine, version version.Version, w http.Respo return writeJSON(w, http.StatusOK, containers) } -func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - name := vars["name"] - job := eng.Job("container_stats", name) - streamJSON(job, w, true) - return job.Run() + + return s.daemon.ContainerStats(vars["name"], utils.NewWriteFlusher(w)) } -func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -533,47 +623,29 @@ func getContainersLogs(eng *engine.Engine, version version.Version, w http.Respo return fmt.Errorf("Missing parameter") } - var ( - inspectJob = eng.Job("container_inspect", vars["name"]) - logsJob = eng.Job("logs", vars["name"]) - c, err = inspectJob.Stdout.AddEnv() - ) - if err != nil { - return err - } - logsJob.Setenv("follow", r.Form.Get("follow")) - logsJob.Setenv("tail", r.Form.Get("tail")) - logsJob.Setenv("stdout", r.Form.Get("stdout")) - logsJob.Setenv("stderr", r.Form.Get("stderr")) - logsJob.Setenv("timestamps", r.Form.Get("timestamps")) // Validate args here, because we can't return not StatusOK after job.Run() call - stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") + stdout, stderr := boolValue(r, "stdout"), boolValue(r, "stderr") if !(stdout || stderr) { return fmt.Errorf("Bad parameters: you must choose at least one stream") } - if err = inspectJob.Run(); err != nil { - return err - } - var outStream, errStream io.Writer - outStream = utils.NewWriteFlusher(w) - - if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } else { - errStream = outStream + logsConfig := &daemon.ContainerLogsConfig{ + Follow: boolValue(r, "follow"), + Timestamps: boolValue(r, "timestamps"), + Tail: r.Form.Get("tail"), + UseStdout: stdout, + UseStderr: stderr, + OutStream: utils.NewWriteFlusher(w), } - logsJob.Stdout.Add(outStream) - logsJob.Stderr.Set(errStream) - if err := logsJob.Run(); err != nil { - fmt.Fprintf(outStream, "Error running logs job: %s\n", err) + if err := s.daemon.ContainerLogs(vars["name"], logsConfig); err != nil { + fmt.Fprintf(w, "Error running logs job: %s\n", err) } + return nil } -func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -581,57 +653,63 @@ func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseW return fmt.Errorf("Missing parameter") } - job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) - job.Setenv("force", r.Form.Get("force")) - if err := job.Run(); err != nil { + repo := r.Form.Get("repo") + tag := r.Form.Get("tag") + force := boolValue(r, "force") + if err := s.daemon.Repositories().Tag(repo, tag, vars["name"], force); err != nil { return err } w.WriteHeader(http.StatusCreated) return nil } -func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } - var ( - config engine.Env - job = eng.Job("commit", r.Form.Get("container")) - stdoutBuffer = bytes.NewBuffer(nil) - ) if err := checkForJson(r); err != nil { return err } - if err := config.Decode(r.Body); err != nil { - logrus.Errorf("%s", err) - } + cont := r.Form.Get("container") + pause := boolValue(r, "pause") if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { - job.Setenv("pause", "1") - } else { - job.Setenv("pause", r.FormValue("pause")) + pause = true + } + + c, _, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil && err != io.EOF { //Do not fail if body is empty. + return err } - job.Setenv("repo", r.Form.Get("repo")) - job.Setenv("tag", r.Form.Get("tag")) - job.Setenv("author", r.Form.Get("author")) - job.Setenv("comment", r.Form.Get("comment")) - job.SetenvList("changes", r.Form["changes"]) - job.SetenvSubEnv("config", &config) + if c == nil { + c = &runconfig.Config{} + } - job.Stdout.Add(stdoutBuffer) - if err := job.Run(); err != nil { + containerCommitConfig := &daemon.ContainerCommitConfig{ + Pause: pause, + Repo: r.Form.Get("repo"), + Tag: r.Form.Get("tag"), + Author: r.Form.Get("author"), + Comment: r.Form.Get("comment"), + Changes: r.Form["changes"], + Config: c, + } + + imgID, err := builder.Commit(s.daemon, cont, containerCommitConfig) + if err != nil { return err } + return writeJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ - ID: engine.Tail(stdoutBuffer, 1), + ID: imgID, }) } // Creates an image from Pull or from Import -func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -640,18 +718,27 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon image = r.Form.Get("fromImage") repo = r.Form.Get("repo") tag = r.Form.Get("tag") - job *engine.Job ) authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := ®istry.AuthConfig{} + authConfig := &cliconfig.AuthConfig{} if authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} + authConfig = &cliconfig.AuthConfig{} } } + + var ( + opErr error + useJSON = version.GreaterThan("1.0") + ) + + if useJSON { + w.Header().Set("Content-Type", "application/json") + } + if image != "" { //pull if tag == "" { image, tag = parsers.ParseRepositoryTag(image) @@ -662,42 +749,52 @@ func postImagesCreate(eng *engine.Engine, version version.Version, w http.Respon metaHeaders[k] = v } } - job = eng.Job("pull", image, tag) - job.SetenvBool("parallel", version.GreaterThan("1.3")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) + + imagePullConfig := &graph.ImagePullConfig{ + Parallel: version.GreaterThan("1.3"), + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + OutStream: utils.NewWriteFlusher(w), + Json: useJSON, + } + + opErr = s.daemon.Repositories().Pull(image, tag, imagePullConfig) } else { //import if tag == "" { repo, tag = parsers.ParseRepositoryTag(repo) } - job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) - job.Stdin.Add(r.Body) - job.SetenvList("changes", r.Form["changes"]) - } - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { + src := r.Form.Get("fromSrc") + imageImportConfig := &graph.ImageImportConfig{ + Changes: r.Form["changes"], + InConfig: r.Body, + OutStream: utils.NewWriteFlusher(w), + Json: useJSON, + } + + newConfig, err := builder.BuildFromConfig(s.daemon, &runconfig.Config{}, imageImportConfig.Changes) + if err != nil { return err } - sf := streamformatter.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) + imageImportConfig.ContainerConfig = newConfig + + opErr = s.daemon.Repositories().Import(src, repo, tag, imageImportConfig) + } + + if opErr != nil { + sf := streamformatter.NewStreamFormatter(useJSON) + return fmt.Errorf(string(sf.FormatError(opErr))) } return nil } -func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } var ( - config *registry.AuthConfig + config *cliconfig.AuthConfig authEncoded = r.Header.Get("X-Registry-Auth") headers = map[string][]string{} ) @@ -707,7 +804,7 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons if err := json.NewDecoder(authJson).Decode(&config); err != nil { // for a search it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - config = ®istry.AuthConfig{} + config = &cliconfig.AuthConfig{} } } for k, v := range r.Header { @@ -715,15 +812,14 @@ func getImagesSearch(eng *engine.Engine, version version.Version, w http.Respons headers[k] = v } } - d := getDaemon(eng) - query, err := d.RegistryService.Search(r.Form.Get("term"), config, headers) + query, err := s.daemon.RegistryService.Search(r.Form.Get("term"), config, headers) if err != nil { return err } return json.NewEncoder(w).Encode(query.Results) } -func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } @@ -737,7 +833,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response if err := parseForm(r); err != nil { return err } - authConfig := ®istry.AuthConfig{} + authConfig := &cliconfig.AuthConfig{} authEncoded := r.Header.Get("X-Registry-Auth") if authEncoded != "" { @@ -745,7 +841,7 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // to increase compatibility to existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} + authConfig = &cliconfig.AuthConfig{} } } else { // the old format is supported for compatibility if there was no authConfig header @@ -754,55 +850,69 @@ func postImagesPush(eng *engine.Engine, version version.Version, w http.Response } } - job := eng.Job("push", vars["name"]) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - job.Setenv("tag", r.Form.Get("tag")) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) + useJSON := version.GreaterThan("1.0") + name := vars["name"] + + output := utils.NewWriteFlusher(w) + imagePushConfig := &graph.ImagePushConfig{ + MetaHeaders: metaHeaders, + AuthConfig: authConfig, + Tag: r.Form.Get("tag"), + OutStream: output, + Json: useJSON, + } + if useJSON { + w.Header().Set("Content-Type", "application/json") } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { + if err := s.daemon.Repositories().Push(name, imagePushConfig); err != nil { + if !output.Flushed() { return err } - sf := streamformatter.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) + sf := streamformatter.NewStreamFormatter(useJSON) + output.Write(sf.FormatError(err)) } return nil + } -func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } if err := parseForm(r); err != nil { return err } - if version.GreaterThan("1.0") { + + useJSON := version.GreaterThan("1.0") + if useJSON { w.Header().Set("Content-Type", "application/x-tar") } - var job *engine.Job + + output := utils.NewWriteFlusher(w) + imageExportConfig := &graph.ImageExportConfig{Outstream: output} if name, ok := vars["name"]; ok { - job = eng.Job("image_export", name) + imageExportConfig.Names = []string{name} } else { - job = eng.Job("image_export", r.Form["names"]...) + imageExportConfig.Names = r.Form["names"] + } + + if err := s.daemon.Repositories().ImageExport(imageExportConfig); err != nil { + if !output.Flushed() { + return err + } + sf := streamformatter.NewStreamFormatter(useJSON) + output.Write(sf.FormatError(err)) } - job.Stdout.Add(w) - return job.Run() + return nil + } -func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - job := eng.Job("load") - job.Stdin.Add(r.Body) - job.Stdout.Add(w) - return job.Run() +func (s *Server) postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + return s.daemon.Repositories().Load(r.Body, w) } -func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } @@ -810,50 +920,49 @@ func postContainersCreate(eng *engine.Engine, version version.Version, w http.Re return err } var ( - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - stdoutBuffer = bytes.NewBuffer(nil) - warnings = bytes.NewBuffer(nil) + warnings []string + name = r.Form.Get("name") ) - if err := job.DecodeEnv(r.Body); err != nil { + config, hostConfig, err := runconfig.DecodeContainerConfig(r.Body) + if err != nil { return err } - // Read container ID from the first line of stdout - job.Stdout.Add(stdoutBuffer) - // Read warnings from stderr - job.Stderr.Add(warnings) - if err := job.Run(); err != nil { + + containerId, warnings, err := s.daemon.ContainerCreate(name, config, hostConfig) + if err != nil { return err } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } + return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ - ID: engine.Tail(stdoutBuffer, 1), - Warnings: outWarnings, + ID: containerId, + Warnings: warnings, }) } -func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - job := eng.Job("restart", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { + + timeout, err := strconv.Atoi(r.Form.Get("t")) + if err != nil { + return err + } + + if err := s.daemon.ContainerRestart(vars["name"], timeout); err != nil { return err } + w.WriteHeader(http.StatusNoContent) + return nil } -func postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -861,17 +970,16 @@ func postContainerRename(eng *engine.Engine, version version.Version, w http.Res return fmt.Errorf("Missing parameter") } - d := getDaemon(eng) name := vars["name"] newName := r.Form.Get("name") - if err := d.ContainerRename(name, newName); err != nil { + if err := s.daemon.ContainerRename(name, newName); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } -func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -880,18 +988,17 @@ func deleteContainers(eng *engine.Engine, version version.Version, w http.Respon } name := vars["name"] - if name == "" { - return fmt.Errorf("Container name cannot be empty") - } - - d := getDaemon(eng) config := &daemon.ContainerRmConfig{ - ForceRemove: toBool(r.Form.Get("force")), - RemoveVolume: toBool(r.Form.Get("v")), - RemoveLink: toBool(r.Form.Get("link")), + ForceRemove: boolValue(r, "force"), + RemoveVolume: boolValue(r, "v"), + RemoveLink: boolValue(r, "link"), } - if err := d.ContainerRm(name, config); err != nil { + if err := s.daemon.ContainerRm(name, config); err != nil { + // Force a 404 for the empty string + if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { + return fmt.Errorf("no such id: \"\"") + } return err } @@ -900,7 +1007,7 @@ func deleteContainers(eng *engine.Engine, version version.Version, w http.Respon return nil } -func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -908,12 +1015,11 @@ func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr return fmt.Errorf("Missing parameter") } - d := getDaemon(eng) name := vars["name"] - force := toBool(r.Form.Get("force")) - noprune := toBool(r.Form.Get("noprune")) + force := boolValue(r, "force") + noprune := boolValue(r, "noprune") - list, err := d.ImageDelete(name, force, noprune) + list, err := s.daemon.ImageDelete(name, force, noprune) if err != nil { return err } @@ -921,32 +1027,32 @@ func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWr return writeJSON(w, http.StatusOK, list) } -func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - var ( - name = vars["name"] - job = eng.Job("start", name) - ) // If contentLength is -1, we can assumed chunked encoding // or more technically that the length is unknown - // http://golang.org/src/pkg/net/http/request.go#L139 + // https://golang.org/src/pkg/net/http/request.go#L139 // net/http otherwise seems to swallow any headers related to chunked encoding // including r.TransferEncoding // allow a nil body for backwards compatibility + var hostConfig *runconfig.HostConfig if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { if err := checkForJson(r); err != nil { return err } - if err := job.DecodeEnv(r.Body); err != nil { + c, err := runconfig.DecodeHostConfig(r.Body) + if err != nil { return err } + + hostConfig = c } - if err := job.Run(); err != nil { + if err := s.daemon.ContainerStart(vars["name"], hostConfig); err != nil { if err.Error() == "Container already started" { w.WriteHeader(http.StatusNotModified) return nil @@ -957,16 +1063,20 @@ func postContainersStart(eng *engine.Engine, version version.Version, w http.Res return nil } -func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - job := eng.Job("stop", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { + + seconds, err := strconv.Atoi(r.Form.Get("t")) + if err != nil { + return err + } + + if err := s.daemon.ContainerStop(vars["name"], seconds); err != nil { if err.Error() == "Container already stopped" { w.WriteHeader(http.StatusNotModified) return nil @@ -974,44 +1084,54 @@ func postContainersStop(eng *engine.Engine, version version.Version, w http.Resp return err } w.WriteHeader(http.StatusNoContent) + return nil } -func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - var ( - stdoutBuffer = bytes.NewBuffer(nil) - job = eng.Job("wait", vars["name"]) - ) - job.Stdout.Add(stdoutBuffer) - if err := job.Run(); err != nil { - return err - } - statusCode, err := strconv.Atoi(engine.Tail(stdoutBuffer, 1)) + + name := vars["name"] + cont, err := s.daemon.Get(name) if err != nil { return err } + + status, _ := cont.WaitStop(-1 * time.Second) + return writeJSON(w, http.StatusOK, &types.ContainerWaitResponse{ - StatusCode: statusCode, + StatusCode: status, }) } -func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { return err } - return nil + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + cont, err := s.daemon.Get(vars["name"]) + if err != nil { + return err + } + + return cont.Resize(height, width) } -func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } @@ -1019,9 +1139,7 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re return fmt.Errorf("Missing parameter") } - d := getDaemon(eng) - - cont, err := d.Get(vars["name"]) + cont, err := s.daemon.Get(vars["name"]) if err != nil { return err } @@ -1046,19 +1164,19 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re } else { errStream = outStream } - logs := toBool(r.Form.Get("logs")) - stream := toBool(r.Form.Get("stream")) + logs := boolValue(r, "logs") + stream := boolValue(r, "stream") var stdin io.ReadCloser var stdout, stderr io.Writer - if toBool(r.Form.Get("stdin")) { + if boolValue(r, "stdin") { stdin = inStream } - if toBool(r.Form.Get("stdout")) { + if boolValue(r, "stdout") { stdout = outStream } - if toBool(r.Form.Get("stderr")) { + if boolValue(r, "stderr") { stderr = errStream } @@ -1068,16 +1186,14 @@ func postContainersAttach(eng *engine.Engine, version version.Version, w http.Re return nil } -func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - d := getDaemon(eng) - - cont, err := d.Get(vars["name"]) + cont, err := s.daemon.Get(vars["name"]) if err != nil { return err } @@ -1096,49 +1212,73 @@ func wsContainersAttach(eng *engine.Engine, version version.Version, w http.Resp return nil } -func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("container_inspect", vars["name"]) + + name := vars["name"] + if version.LessThan("1.12") { - job.SetenvBool("raw", true) + containerJSONRaw, err := s.daemon.ContainerInspectRaw(name) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSONRaw) } - streamJSON(job, w, false) - return job.Run() + containerJSON, err := s.daemon.ContainerInspect(name) + if err != nil { + return err + } + return writeJSON(w, http.StatusOK, containerJSON) } -func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter 'id'") } - var job = eng.Job("execInspect", vars["id"]) - streamJSON(job, w, false) - return job.Run() + + eConfig, err := s.daemon.ContainerExecInspect(vars["id"]) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, eConfig) } -func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } - var job = eng.Job("image_inspect", vars["name"]) + + name := vars["name"] if version.LessThan("1.12") { - job.SetenvBool("raw", true) + imageInspectRaw, err := s.daemon.Repositories().LookupRaw(name) + if err != nil { + return err + } + + return writeJSON(w, http.StatusOK, imageInspectRaw) + } + + imageInspect, err := s.daemon.Repositories().Lookup(name) + if err != nil { + return err } - streamJSON(job, w, false) - return job.Run() + + return writeJSON(w, http.StatusOK, imageInspect) } -func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version.LessThan("1.3") { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} + authConfig = &cliconfig.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") - configFile = ®istry.ConfigFile{} - job = eng.Job("build") + configFile = &cliconfig.ConfigFile{} + buildConfig = builder.NewBuildConfig() ) // This block can be removed when API versions prior to 1.9 are deprecated. @@ -1150,7 +1290,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} + authConfig = &cliconfig.AuthConfig{} } } @@ -1159,40 +1299,44 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty - configFile = ®istry.ConfigFile{} + configFile = &cliconfig.ConfigFile{} } } if version.GreaterThanOrEqualTo("1.8") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) + w.Header().Set("Content-Type", "application/json") + buildConfig.JSONFormat = true } - if toBool(r.FormValue("forcerm")) && version.GreaterThanOrEqualTo("1.12") { - job.Setenv("rm", "1") + if boolValue(r, "forcerm") && version.GreaterThanOrEqualTo("1.12") { + buildConfig.Remove = true } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { - job.Setenv("rm", "1") + buildConfig.Remove = true } else { - job.Setenv("rm", r.FormValue("rm")) - } - if toBool(r.FormValue("pull")) && version.GreaterThanOrEqualTo("1.16") { - job.Setenv("pull", "1") - } - job.Stdin.Add(r.Body) - job.Setenv("remote", r.FormValue("remote")) - job.Setenv("dockerfile", r.FormValue("dockerfile")) - job.Setenv("t", r.FormValue("t")) - job.Setenv("q", r.FormValue("q")) - job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("forcerm", r.FormValue("forcerm")) - job.SetenvJson("authConfig", authConfig) - job.SetenvJson("configFile", configFile) - job.Setenv("memswap", r.FormValue("memswap")) - job.Setenv("memory", r.FormValue("memory")) - job.Setenv("cpusetcpus", r.FormValue("cpusetcpus")) - job.Setenv("cpushares", r.FormValue("cpushares")) + buildConfig.Remove = boolValue(r, "rm") + } + if boolValue(r, "pull") && version.GreaterThanOrEqualTo("1.16") { + buildConfig.Pull = true + } + + output := utils.NewWriteFlusher(w) + buildConfig.Stdout = output + buildConfig.Context = r.Body + + buildConfig.RemoteURL = r.FormValue("remote") + buildConfig.DockerfileName = r.FormValue("dockerfile") + buildConfig.RepoName = r.FormValue("t") + buildConfig.SuppressOutput = boolValue(r, "q") + buildConfig.NoCache = boolValue(r, "nocache") + buildConfig.ForceRemove = boolValue(r, "forcerm") + buildConfig.AuthConfig = authConfig + buildConfig.ConfigFile = configFile + buildConfig.MemorySwap = int64Value(r, "memswap") + buildConfig.Memory = int64Value(r, "memory") + buildConfig.CpuShares = int64Value(r, "cpushares") + buildConfig.CpuQuota = int64Value(r, "cpuquota") + buildConfig.CpuSetCpus = r.FormValue("cpusetcpus") + buildConfig.CpuSetMems = r.FormValue("cpusetmems") // Job cancellation. Note: not all job types support this. if closeNotifier, ok := w.(http.CloseNotifier); ok { @@ -1202,14 +1346,16 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite select { case <-finished: case <-closeNotifier.CloseNotify(): - logrus.Infof("Client disconnected, cancelling job: %s", job.Name) - job.Cancel() + logrus.Infof("Client disconnected, cancelling job: build") + buildConfig.Cancel() } }() } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { + if err := builder.Build(s.daemon, buildConfig); err != nil { + // Do not write the error in the http output if it's still empty. + // This prevents from writing a 200(OK) when there is an interal error. + if !output.Flushed() { return err } sf := streamformatter.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) @@ -1218,7 +1364,7 @@ func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWrite return nil } -func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if vars == nil { return fmt.Errorf("Missing parameter") } @@ -1242,7 +1388,7 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp res = res[1:] } - cont, err := getDaemon(eng).Get(vars["name"]) + cont, err := s.daemon.Get(vars["name"]) if err != nil { logrus.Errorf("%v", err) if strings.Contains(strings.ToLower(err.Error()), "no such id") { @@ -1268,57 +1414,94 @@ func postContainersCopy(eng *engine.Engine, version version.Version, w http.Resp return nil } -func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainersCheckpoint(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + criuOpts := &libcontainer.CriuOpts{} + if err := json.NewDecoder(r.Body).Decode(criuOpts); err != nil { + return err + } + + if err := s.daemon.ContainerCheckpoint(vars["name"], criuOpts); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersRestore(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + restoreOpts := &libcontainer.CriuOpts{} + if err := json.NewDecoder(r.Body).Decode(restoreOpts); err != nil { + return err + } + + if err := s.daemon.ContainerRestore(vars["name"], restoreOpts); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } - var ( - name = vars["name"] - job = eng.Job("execCreate", name) - stdoutBuffer = bytes.NewBuffer(nil) - outWarnings []string - warnings = bytes.NewBuffer(nil) - ) + name := vars["name"] - if err := job.DecodeEnv(r.Body); err != nil { + execConfig := &runconfig.ExecConfig{} + if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { return err } + execConfig.Container = name + + if len(execConfig.Cmd) == 0 { + return fmt.Errorf("No exec command specified") + } - job.Stdout.Add(stdoutBuffer) - // Read warnings from stderr - job.Stderr.Add(warnings) // Register an instance of Exec in container. - if err := job.Run(); err != nil { - fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) + id, err := s.daemon.ContainerExecCreate(execConfig) + if err != nil { + logrus.Errorf("Error setting up exec command in container %s: %s", name, err) return err } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: engine.Tail(stdoutBuffer, 1), - Warnings: outWarnings, + ID: id, }) } // TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil } var ( - name = vars["name"] - job = eng.Job("execStart", name) - errOut io.Writer = os.Stderr + execName = vars["name"] + stdin io.ReadCloser + stdout io.Writer + stderr io.Writer ) - if err := job.DecodeEnv(r.Body); err != nil { + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { return err } - if !job.GetenvBool("Detach") { + + if !execStartCheck.Detach { // Setting up the streaming http interface. inStream, outStream, err := hijackServer(w) if err != nil { @@ -1334,21 +1517,20 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http. fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") } - if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { + if !execStartCheck.Tty && version.GreaterThanOrEqualTo("1.6") { errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } else { errStream = outStream } - job.Stdin.Add(inStream) - job.Stdout.Add(outStream) - job.Stderr.Set(errStream) - errOut = outStream + stdin = inStream + stdout = outStream + stderr = errStream } // Now run the user process in container. - job.SetCloseIO(false) - if err := job.Run(); err != nil { - fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) + + if err := s.daemon.ContainerExecStart(execName, stdin, stdout, stderr); err != nil { + logrus.Errorf("Error starting exec command in container %s: %s", execName, err) return err } w.WriteHeader(http.StatusNoContent) @@ -1356,20 +1538,27 @@ func postContainerExecStart(eng *engine.Engine, version version.Version, w http. return nil } -func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return err } if vars == nil { return fmt.Errorf("Missing parameter") } - if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { + + height, err := strconv.Atoi(r.Form.Get("h")) + if err != nil { return err } - return nil + width, err := strconv.Atoi(r.Form.Get("w")) + if err != nil { + return err + } + + return s.daemon.ContainerExecResize(vars["name"], height, width) } -func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } @@ -1380,7 +1569,7 @@ func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") } -func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { +func (s *Server) ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { _, err := w.Write([]byte{'O', 'K'}) return err } @@ -1421,71 +1610,73 @@ func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, local } // we keep enableCors just for legacy usage, need to be removed in the future -func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders string, dockerVersion string) *mux.Router { +func createRouter(s *Server, eng *engine.Engine) *mux.Router { r := mux.NewRouter() if os.Getenv("DEBUG") != "" { ProfilerSetup(r, "/debug/") } m := map[string]map[string]HttpApiFunc{ "GET": { - "/_ping": ping, - "/events": getEvents, - "/info": getInfo, - "/version": getVersion, - "/images/json": getImagesJSON, - "/images/viz": getImagesViz, - "/images/search": getImagesSearch, - "/images/get": getImagesGet, - "/images/{name:.*}/get": getImagesGet, - "/images/{name:.*}/history": getImagesHistory, - "/images/{name:.*}/json": getImagesByName, - "/containers/ps": getContainersJSON, - "/containers/json": getContainersJSON, - "/containers/{name:.*}/export": getContainersExport, - "/containers/{name:.*}/changes": getContainersChanges, - "/containers/{name:.*}/json": getContainersByName, - "/containers/{name:.*}/top": getContainersTop, - "/containers/{name:.*}/logs": getContainersLogs, - "/containers/{name:.*}/stats": getContainersStats, - "/containers/{name:.*}/attach/ws": wsContainersAttach, - "/exec/{id:.*}/json": getExecByID, + "/_ping": s.ping, + "/events": s.getEvents, + "/info": s.getInfo, + "/version": s.getVersion, + "/images/json": s.getImagesJSON, + "/images/search": s.getImagesSearch, + "/images/get": s.getImagesGet, + "/images/{name:.*}/get": s.getImagesGet, + "/images/{name:.*}/history": s.getImagesHistory, + "/images/{name:.*}/json": s.getImagesByName, + "/containers/ps": s.getContainersJSON, + "/containers/json": s.getContainersJSON, + "/containers/{name:.*}/export": s.getContainersExport, + "/containers/{name:.*}/changes": s.getContainersChanges, + "/containers/{name:.*}/json": s.getContainersByName, + "/containers/{name:.*}/top": s.getContainersTop, + "/containers/{name:.*}/logs": s.getContainersLogs, + "/containers/{name:.*}/stats": s.getContainersStats, + "/containers/{name:.*}/attach/ws": s.wsContainersAttach, + "/exec/{id:.*}/json": s.getExecByID, }, "POST": { - "/auth": postAuth, - "/commit": postCommit, - "/build": postBuild, - "/images/create": postImagesCreate, - "/images/load": postImagesLoad, - "/images/{name:.*}/push": postImagesPush, - "/images/{name:.*}/tag": postImagesTag, - "/containers/create": postContainersCreate, - "/containers/{name:.*}/kill": postContainersKill, - "/containers/{name:.*}/pause": postContainersPause, - "/containers/{name:.*}/unpause": postContainersUnpause, - "/containers/{name:.*}/restart": postContainersRestart, - "/containers/{name:.*}/start": postContainersStart, - "/containers/{name:.*}/stop": postContainersStop, - "/containers/{name:.*}/wait": postContainersWait, - "/containers/{name:.*}/resize": postContainersResize, - "/containers/{name:.*}/attach": postContainersAttach, - "/containers/{name:.*}/copy": postContainersCopy, - "/containers/{name:.*}/exec": postContainerExecCreate, - "/exec/{name:.*}/start": postContainerExecStart, - "/exec/{name:.*}/resize": postContainerExecResize, - "/containers/{name:.*}/rename": postContainerRename, + "/auth": s.postAuth, + "/commit": s.postCommit, + "/build": s.postBuild, + "/images/create": s.postImagesCreate, + "/images/load": s.postImagesLoad, + "/images/{name:.*}/push": s.postImagesPush, + "/images/{name:.*}/tag": s.postImagesTag, + "/containers/create": s.postContainersCreate, + "/containers/{name:.*}/kill": s.postContainersKill, + "/containers/{name:.*}/pause": s.postContainersPause, + "/containers/{name:.*}/unpause": s.postContainersUnpause, + "/containers/{name:.*}/restart": s.postContainersRestart, + "/containers/{name:.*}/start": s.postContainersStart, + "/containers/{name:.*}/stop": s.postContainersStop, + "/containers/{name:.*}/wait": s.postContainersWait, + "/containers/{name:.*}/resize": s.postContainersResize, + "/containers/{name:.*}/attach": s.postContainersAttach, + "/containers/{name:.*}/copy": s.postContainersCopy, + "/containers/{name:.*}/exec": s.postContainerExecCreate, + "/exec/{name:.*}/start": s.postContainerExecStart, + "/exec/{name:.*}/resize": s.postContainerExecResize, + "/containers/{name:.*}/rename": s.postContainerRename, + "/containers/{name:.*}/checkpoint": s.postContainersCheckpoint, + "/containers/{name:.*}/restore": s.postContainersRestore, }, "DELETE": { - "/containers/{name:.*}": deleteContainers, - "/images/{name:.*}": deleteImages, + "/containers/{name:.*}": s.deleteContainers, + "/images/{name:.*}": s.deleteImages, }, "OPTIONS": { - "": optionsHandler, + "": s.optionsHandler, }, } // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" // otherwise, all head values will be passed to HTTP handler - if corsHeaders == "" && enableCors { + corsHeaders := s.cfg.CorsHeaders + if corsHeaders == "" && s.cfg.EnableCors { corsHeaders = "*" } @@ -1498,7 +1689,7 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders stri localMethod := method // build the handler function - f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, corsHeaders, version.Version(dockerVersion)) + f := makeHttpHandler(eng, s.cfg.Logging, localMethod, localRoute, localFct, corsHeaders, version.Version(s.cfg.Version)) // add the new route if localRoute == "" { @@ -1517,7 +1708,14 @@ func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders stri // FIXME: refactor this to be part of Server and not require re-creating a new // router each time. This requires first moving ListenAndServe into Server. func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) { - router := createRouter(eng, false, true, "", "") + cfg := &ServerConfig{ + EnableCors: true, + Version: string(apiversion), + } + api := New(cfg, eng) + daemon, _ := eng.HackGetGlobalVar("httpapi.daemon").(*daemon.Daemon) + api.AcceptConnections(daemon) + router := createRouter(api, eng) // Insert APIVERSION into the request as a convenience req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) router.ServeHTTP(w, req) @@ -1548,58 +1746,3 @@ func allocateDaemonPort(addr string) error { } return nil } - -type Server interface { - Serve() error - Close() error -} - -// ServeApi loops through all of the protocols sent in to docker and spawns -// off a go routine to setup a serving http.Server for each. -func ServeApi(job *engine.Job) error { - if len(job.Args) == 0 { - return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) - } - var ( - protoAddrs = job.Args - chErrors = make(chan error, len(protoAddrs)) - ) - - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return fmt.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) - } - go func() { - logrus.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job) - if err != nil { - chErrors <- err - return - } - job.Eng.OnShutdown(func() { - if err := srv.Close(); err != nil { - logrus.Error(err) - } - }) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }() - } - - for i := 0; i < len(protoAddrs); i++ { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -func toBool(s string) bool { - s = strings.ToLower(strings.TrimSpace(s)) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} diff --git a/api/server/server_linux.go b/api/server/server_linux.go index 37ba7ed80dec2..43f0eefe0ecfa 100644 --- a/api/server/server_linux.go +++ b/api/server/server_linux.go @@ -8,22 +8,15 @@ import ( "net/http" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" + "github.com/docker/docker/daemon" "github.com/docker/docker/pkg/systemd" ) -// NewServer sets up the required Server and does protocol specific checking. -func NewServer(proto, addr string, job *engine.Job) (Server, error) { +// newServer sets up the required serverCloser and does protocol specific checking. +func (s *Server) newServer(proto, addr string) (serverCloser, error) { var ( err error l net.Listener - r = createRouter( - job.Eng, - job.GetenvBool("Logging"), - job.GetenvBool("EnableCors"), - job.Getenv("CorsHeaders"), - job.Getenv("Version"), - ) ) switch proto { case "fd": @@ -35,13 +28,13 @@ func NewServer(proto, addr string, job *engine.Job) (Server, error) { // We don't want to start serving on these sockets until the // daemon is initialized and installed. Otherwise required handlers // won't be ready. - <-activationLock + <-s.start // Since ListenFD will return one or more sockets we have // to create a go func to spawn off multiple serves for i := range ls { listener := ls[i] go func() { - httpSrv := http.Server{Handler: r} + httpSrv := http.Server{Handler: s.router} chErrors <- httpSrv.Serve(listener) }() } @@ -52,17 +45,17 @@ func NewServer(proto, addr string, job *engine.Job) (Server, error) { } return nil, nil case "tcp": - if !job.GetenvBool("TlsVerify") { - logrus.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + if !s.cfg.TlsVerify { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } - if l, err = NewTcpSocket(addr, tlsConfigFromJob(job)); err != nil { + if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg), s.start); err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { return nil, err } case "unix": - if l, err = NewUnixSocket(addr, job.Getenv("SocketGroup")); err != nil { + if l, err = NewUnixSocket(addr, s.cfg.SocketGroup, s.start); err != nil { return nil, err } default: @@ -71,21 +64,20 @@ func NewServer(proto, addr string, job *engine.Job) (Server, error) { return &HttpServer{ &http.Server{ Addr: addr, - Handler: r, + Handler: s.router, }, l, }, nil } -// Called through eng.Job("acceptconnections") -func AcceptConnections(job *engine.Job) error { +func (s *Server) AcceptConnections(d *daemon.Daemon) { // Tell the init daemon we are accepting requests + s.daemon = d go systemd.SdNotify("READY=1") // close the lock so the listeners start accepting connections select { - case <-activationLock: + case <-s.start: default: - close(activationLock) + close(s.start) } - return nil } diff --git a/api/server/server_unit_test.go b/api/server/server_unit_test.go deleted file mode 100644 index 88dadab6f544b..0000000000000 --- a/api/server/server_unit_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package server - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/version" -) - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} - -func TestGetVersion(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("version", func(job *engine.Job) error { - called = true - v := &engine.Env{} - v.SetJson("Version", "42.1") - v.Set("ApiVersion", "1.1.1.1.1") - v.Set("GoVersion", "2.42") - v.Set("Os", "Linux") - v.Set("Arch", "x86_64") - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil - }) - r := serveRequest("GET", "/version", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - v := readEnv(r.Body, t) - if v.Get("Version") != "42.1" { - t.Fatalf("%#v\n", v) - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } -} - -func TestGetInfo(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("info", func(job *engine.Job) error { - called = true - v := &engine.Env{} - v.SetInt("Containers", 1) - v.SetInt("Images", 42000) - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil - }) - r := serveRequest("GET", "/info", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - v := readEnv(r.Body, t) - if v.GetInt("Images") != 42000 { - t.Fatalf("%#v\n", v) - } - if v.GetInt("Containers") != 1 { - t.Fatalf("%#v\n", v) - } - assertContentType(r, "application/json", t) -} - -func TestGetContainersByName(t *testing.T) { - eng := engine.New() - name := "container_name" - var called bool - eng.Register("container_inspect", func(job *engine.Job) error { - called = true - if job.Args[0] != name { - t.Errorf("name != '%s': %#v", name, job.Args[0]) - } - if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { - t.Errorf("dirty env variable not set") - } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { - t.Errorf("dirty env variable set when it shouldn't") - } - v := &engine.Env{} - v.SetBool("dirty", true) - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil - }) - r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - assertContentType(r, "application/json", t) - var stdoutJson interface{} - if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { - t.Fatalf("%#v", err) - } - if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { - t.Fatalf("%#v", stdoutJson) - } -} - -func TestLogs(t *testing.T) { - eng := engine.New() - var inspect bool - var logs bool - eng.Register("container_inspect", func(job *engine.Job) error { - inspect = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - return nil - }) - expected := "logs" - eng.Register("logs", func(job *engine.Job) error { - logs = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - follow := job.Getenv("follow") - if follow != "1" { - t.Fatalf("follow: %s, must be 1", follow) - } - stdout := job.Getenv("stdout") - if stdout != "1" { - t.Fatalf("stdout %s, must be 1", stdout) - } - stderr := job.Getenv("stderr") - if stderr != "" { - t.Fatalf("stderr %s, must be empty", stderr) - } - timestamps := job.Getenv("timestamps") - if timestamps != "1" { - t.Fatalf("timestamps %s, must be 1", timestamps) - } - job.Stdout.Write([]byte(expected)) - return nil - }) - r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) - if r.Code != http.StatusOK { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) - } - if !inspect { - t.Fatal("container_inspect job was not called") - } - if !logs { - t.Fatal("logs job was not called") - } - res := r.Body.String() - if res != expected { - t.Fatalf("Output %s, expected %s", res, expected) - } -} - -func TestLogsNoStreams(t *testing.T) { - eng := engine.New() - var inspect bool - var logs bool - eng.Register("container_inspect", func(job *engine.Job) error { - inspect = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - return nil - }) - eng.Register("logs", func(job *engine.Job) error { - logs = true - return nil - }) - r := serveRequest("GET", "/containers/test/logs", nil, eng, t) - if r.Code != http.StatusBadRequest { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) - } - if inspect { - t.Fatal("container_inspect job was called, but it shouldn't") - } - if logs { - t.Fatal("logs job was called, but it shouldn't") - } - res := strings.TrimSpace(r.Body.String()) - expected := "Bad parameters: you must choose at least one stream" - if !strings.Contains(res, expected) { - t.Fatalf("Output %s, expected %s in it", res, expected) - } -} - -func TestGetImagesHistory(t *testing.T) { - eng := engine.New() - imageName := "docker-test-image" - var called bool - eng.Register("history", func(job *engine.Job) error { - called = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != imageName { - t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) - } - v := &engine.Env{} - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil - }) - r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - if r.Code != http.StatusOK { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } -} - -func TestGetImagesByName(t *testing.T) { - eng := engine.New() - name := "image_name" - var called bool - eng.Register("image_inspect", func(job *engine.Job) error { - called = true - if job.Args[0] != name { - t.Fatalf("name != '%s': %#v", name, job.Args[0]) - } - if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { - t.Fatal("dirty env variable not set") - } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { - t.Fatal("dirty env variable set when it shouldn't") - } - v := &engine.Env{} - v.SetBool("dirty", true) - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil - }) - r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } - var stdoutJson interface{} - if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { - t.Fatalf("%#v", err) - } - if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { - t.Fatalf("%#v", stdoutJson) - } -} - -func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { - return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) -} - -func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { - r := httptest.NewRecorder() - req, err := http.NewRequest(method, target, body) - if err != nil { - t.Fatal(err) - } - ServeRequest(eng, version, r, req) - return r -} - -func readEnv(src io.Reader, t *testing.T) *engine.Env { - out := engine.NewOutput() - v, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, src); err != nil { - t.Fatal(err) - } - out.Close() - return v -} - -func toJson(data interface{}, t *testing.T) io.Reader { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(data); err != nil { - t.Fatal(err) - } - return &buf -} - -func assertContentType(recorder *httptest.ResponseRecorder, contentType string, t *testing.T) { - if recorder.HeaderMap.Get("Content-Type") != contentType { - t.Fatalf("%#v\n", recorder) - } -} - -// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that -// should die as soon as we converted all integration tests? -// assertHttpNotError expect the given response to not have an error. -// Otherwise the it causes the test to fail. -func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { - // Non-error http status are [200, 400) - if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { - t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) - } -} - -func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) types.Image { - return types.Image{ - RepoTags: data.RepoTags, - ID: data.Id, - Created: int(data.Created), - Size: int(data.Size), - VirtualSize: int(data.VirtualSize), - } -} - -type getImagesJSONStruct struct { - RepoTags []string - Id string - Created int64 - Size int64 - VirtualSize int64 -} - -var sampleImage getImagesJSONStruct = getImagesJSONStruct{ - RepoTags: []string{"test-name:test-tag"}, - Id: "ID", - Created: 999, - Size: 777, - VirtualSize: 666, -} diff --git a/api/server/server_windows.go b/api/server/server_windows.go index ad7b3c48ad511..c121bbd3e8471 100644 --- a/api/server/server_windows.go +++ b/api/server/server_windows.go @@ -1,33 +1,28 @@ // +build windows + package server import ( "errors" "net" + "net/http" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" + "github.com/docker/docker/daemon" ) // NewServer sets up the required Server and does protocol specific checking. -func NewServer(proto, addr string, job *engine.Job) (Server, error) { +func (s *Server) newServer(proto, addr string) (Server, error) { var ( err error l net.Listener - r = createRouter( - job.Eng, - job.GetenvBool("Logging"), - job.GetenvBool("EnableCors"), - job.Getenv("CorsHeaders"), - job.Getenv("Version"), - ) ) switch proto { case "tcp": - if !job.GetenvBool("TlsVerify") { - logrus.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") + if !s.cfg.TlsVerify { + logrus.Warn("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") } - if l, err = NewTcpSocket(addr, tlsConfigFromJob(job)); err != nil { + if l, err = NewTcpSocket(addr, tlsConfigFromServerConfig(s.cfg)); err != nil { return nil, err } if err := allocateDaemonPort(addr); err != nil { @@ -36,15 +31,21 @@ func NewServer(proto, addr string, job *engine.Job) (Server, error) { default: return nil, errors.New("Invalid protocol format. Windows only supports tcp.") } + return &HttpServer{ + &http.Server{ + Addr: addr, + Handler: s.router, + }, + l, + }, nil } -// Called through eng.Job("acceptconnections") -func AcceptConnections(job *engine.Job) error { +func (s *Server) AcceptConnections(d *daemon.Daemon) { + s.daemon = d // close the lock so the listeners start accepting connections select { - case <-activationLock: + case <-s.start: default: - close(activationLock) + close(s.start) } - return nil } diff --git a/api/server/tcp_socket.go b/api/server/tcp_socket.go index 415542c1430c7..a1f57231a5d05 100644 --- a/api/server/tcp_socket.go +++ b/api/server/tcp_socket.go @@ -8,7 +8,6 @@ import ( "net" "os" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/listenbuffer" ) @@ -19,21 +18,21 @@ type tlsConfig struct { Verify bool } -func tlsConfigFromJob(job *engine.Job) *tlsConfig { - verify := job.GetenvBool("TlsVerify") - if !job.GetenvBool("Tls") && !verify { +func tlsConfigFromServerConfig(conf *ServerConfig) *tlsConfig { + verify := conf.TlsVerify + if !conf.Tls && !conf.TlsVerify { return nil } return &tlsConfig{ Verify: verify, - Certificate: job.Getenv("TlsCert"), - Key: job.Getenv("TlsKey"), - CA: job.Getenv("TlsCa"), + Certificate: conf.TlsCert, + Key: conf.TlsKey, + CA: conf.TlsCa, } } -func NewTcpSocket(addr string, config *tlsConfig) (net.Listener, error) { - l, err := listenbuffer.NewListenBuffer("tcp", addr, activationLock) +func NewTcpSocket(addr string, config *tlsConfig, activate <-chan struct{}) (net.Listener, error) { + l, err := listenbuffer.NewListenBuffer("tcp", addr, activate) if err != nil { return nil, err } diff --git a/api/server/unix_socket.go b/api/server/unix_socket.go index e472efd0a4a09..157005da6f05e 100644 --- a/api/server/unix_socket.go +++ b/api/server/unix_socket.go @@ -12,13 +12,13 @@ import ( "github.com/docker/libcontainer/user" ) -func NewUnixSocket(path, group string) (net.Listener, error) { +func NewUnixSocket(path, group string, activate <-chan struct{}) (net.Listener, error) { if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { return nil, err } mask := syscall.Umask(0777) defer syscall.Umask(mask) - l, err := listenbuffer.NewListenBuffer("unix", path, activationLock) + l, err := listenbuffer.NewListenBuffer("unix", path, activate) if err != nil { return nil, err } diff --git a/api/types/types.go b/api/types/types.go index 77b211705dc2d..7c31065460893 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -1,5 +1,13 @@ package types +import ( + "time" + + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/pkg/version" + "github.com/docker/docker/runconfig" +) + // ContainerCreateResponse contains the information returned to a client on the // creation of a new container. type ContainerCreateResponse struct { @@ -14,9 +22,6 @@ type ContainerCreateResponse struct { type ContainerExecCreateResponse struct { // ID is the exec ID. ID string `json:"Id"` - - // Warnings are any warnings encountered during the execution of the command. - Warnings []string `json:"Warnings"` } // POST /auth @@ -49,6 +54,7 @@ type ImageHistory struct { CreatedBy string Tags []string Size int64 + Comment string } // DELETE "/images/{name:.*}" @@ -69,6 +75,23 @@ type Image struct { Labels map[string]string } +// GET "/images/{name:.*}/json" +type ImageInspect struct { + Id string + Parent string + Comment string + Created time.Time + Container string + ContainerConfig *runconfig.Config + DockerVersion string + Author string + Config *runconfig.Config + Architecture string + Os string + Size int64 + VirtualSize int64 +} + type LegacyImage struct { ID string `json:"Id"` Repository string @@ -103,3 +126,102 @@ type Container struct { type CopyConfig struct { Resource string } + +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + +type Version struct { + Version string + ApiVersion version.Version + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` +} + +// GET "/info" +type Info struct { + ID string + Containers int + Images int + Driver string + DriverStatus [][2]string + MemoryLimit bool + SwapLimit bool + CpuCfsQuota bool + IPv4Forwarding bool + Debug bool + NFd int + NGoroutines int + SystemTime string + ExecutionDriver string + LoggingDriver string + NEventsListener int + KernelVersion string + OperatingSystem string + IndexServerAddress string + RegistryConfig interface{} + InitSha1 string + InitPath string + NCPU int + MemTotal int64 + DockerRootDir string + HttpProxy string + HttpsProxy string + NoProxy string + Name string + Labels []string +} + +// This struct is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +type ContainerState struct { + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt time.Time + FinishedAt time.Time +} + +// GET "/containers/{name:.*}/json" +type ContainerJSON struct { + Id string + Created time.Time + Path string + Args []string + Config *runconfig.Config + State *ContainerState + Image string + NetworkSettings *network.Settings + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Name string + RestartCount int + Driver string + ExecDriver string + MountLabel string + ProcessLabel string + Volumes map[string]string + VolumesRW map[string]bool + AppArmorProfile string + ExecIDs []string + HostConfig *runconfig.HostConfig +} diff --git a/builder/dispatchers.go b/builder/dispatchers.go index 820ac17e3bce1..e807f1aee1034 100644 --- a/builder/dispatchers.go +++ b/builder/dispatchers.go @@ -262,7 +262,7 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) b.Config.Cmd = config.Cmd runconfig.Merge(b.Config, config) - defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) logrus.Debugf("[BUILDER] Command to be executed: %v", b.Config.Cmd) @@ -301,13 +301,15 @@ func run(b *Builder, args []string, attributes map[string]bool, original string) // Argument handling is the same as RUN. // func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { - b.Config.Cmd = handleJsonArgs(args, attributes) + cmdSlice := handleJsonArgs(args, attributes) if !attributes["json"] { - b.Config.Cmd = append([]string{"/bin/sh", "-c"}, b.Config.Cmd...) + cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) } - if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", b.Config.Cmd)); err != nil { + b.Config.Cmd = runconfig.NewCommand(cmdSlice...) + + if err := b.commit("", b.Config.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { return err } @@ -332,13 +334,13 @@ func entrypoint(b *Builder, args []string, attributes map[string]bool, original switch { case attributes["json"]: // ENTRYPOINT ["echo", "hi"] - b.Config.Entrypoint = parsed + b.Config.Entrypoint = runconfig.NewEntrypoint(parsed...) case len(parsed) == 0: // ENTRYPOINT [] b.Config.Entrypoint = nil default: // ENTRYPOINT echo hi - b.Config.Entrypoint = []string{"/bin/sh", "-c", parsed[0]} + b.Config.Entrypoint = runconfig.NewEntrypoint("/bin/sh", "-c", parsed[0]) } // when setting the entrypoint if a CMD was not explicitly set then diff --git a/builder/evaluator.go b/builder/evaluator.go index 6237f2663010a..9a2b57a8f93ea 100644 --- a/builder/evaluator.go +++ b/builder/evaluator.go @@ -30,14 +30,13 @@ import ( "github.com/docker/docker/api" "github.com/docker/docker/builder/command" "github.com/docker/docker/builder/parser" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) @@ -80,7 +79,6 @@ func init() { // processing as it evaluates the parsing result. type Builder struct { Daemon *daemon.Daemon - Engine *engine.Engine // effectively stdio for the run. Because it is not stdio, I said // "Effectively". Do not use stdio anywhere in this package for any reason. @@ -101,8 +99,8 @@ type Builder struct { // the final configs of the Dockerfile but dont want the layers disableCommit bool - AuthConfig *registry.AuthConfig - AuthConfigFile *registry.ConfigFile + AuthConfig *cliconfig.AuthConfig + ConfigFile *cliconfig.ConfigFile // Deprecated, original writer used for ImagePull. To be removed. OutOld io.Writer @@ -124,7 +122,9 @@ type Builder struct { // Set resource restrictions for build containers cpuSetCpus string + cpuSetMems string cpuShares int64 + cpuQuota int64 memory int64 memorySwap int64 diff --git a/builder/internals.go b/builder/internals.go index e0c7987ca13f8..adeadd87c85f1 100644 --- a/builder/internals.go +++ b/builder/internals.go @@ -22,20 +22,21 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/builder/parser" "github.com/docker/docker/daemon" + "github.com/docker/docker/graph" imagepkg "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/tarsum" "github.com/docker/docker/pkg/urlutil" + "github.com/docker/docker/registry" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) func (b *Builder) readContext(context io.Reader) error { @@ -61,7 +62,7 @@ func (b *Builder) readContext(context io.Reader) error { return nil } -func (b *Builder) commit(id string, autoCmd []string, comment string) error { +func (b *Builder) commit(id string, autoCmd *runconfig.Command, comment string) error { if b.disableCommit { return nil } @@ -71,8 +72,8 @@ func (b *Builder) commit(id string, autoCmd []string, comment string) error { b.Config.Image = b.image if id == "" { cmd := b.Config.Cmd - b.Config.Cmd = []string{"/bin/sh", "-c", "#(nop) " + comment} - defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", "#(nop) "+comment) + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { @@ -146,8 +147,15 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp // do the copy (e.g. hash value if cached). Don't actually do // the copy until we've looked at all src files for _, orig := range args[0 : len(args)-1] { - err := calcCopyInfo(b, cmdName, ©Infos, orig, dest, allowRemote, allowDecompression) - if err != nil { + if err := calcCopyInfo( + b, + cmdName, + ©Infos, + orig, + dest, + allowRemote, + allowDecompression, + ); err != nil { return err } } @@ -182,8 +190,8 @@ func (b *Builder) runContextCommand(args []string, allowRemote bool, allowDecomp } cmd := b.Config.Cmd - b.Config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)} - defer func(cmd []string) { b.Config.Cmd = cmd }(cmd) + b.Config.Cmd = runconfig.NewCommand("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)) + defer func(cmd *runconfig.Command) { b.Config.Cmd = cmd }(cmd) hit, err := b.probeCache() if err != nil { @@ -250,7 +258,7 @@ func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath stri *cInfos = append(*cInfos, &ci) // Initiate the download - resp, err := utils.Download(ci.origPath) + resp, err := httputils.Download(ci.origPath) if err != nil { return err } @@ -434,24 +442,29 @@ func (b *Builder) pullImage(name string) (*imagepkg.Image, error) { if tag == "" { tag = "latest" } - job := b.Engine.Job("pull", remote, tag) + pullRegistryAuth := b.AuthConfig - if len(b.AuthConfigFile.Configs) > 0 { + if len(b.ConfigFile.AuthConfigs) > 0 { // The request came with a full auth config file, we prefer to use that repoInfo, err := b.Daemon.RegistryService.ResolveRepository(remote) if err != nil { return nil, err } - resolvedAuth := b.AuthConfigFile.ResolveAuthConfig(repoInfo.Index) + resolvedAuth := registry.ResolveAuthConfig(b.ConfigFile, repoInfo.Index) pullRegistryAuth = &resolvedAuth } - job.SetenvBool("json", b.StreamFormatter.Json()) - job.SetenvBool("parallel", true) - job.SetenvJson("authConfig", pullRegistryAuth) - job.Stdout.Add(ioutils.NopWriteCloser(b.OutOld)) - if err := job.Run(); err != nil { + + imagePullConfig := &graph.ImagePullConfig{ + Parallel: true, + AuthConfig: pullRegistryAuth, + OutStream: ioutils.NopWriteCloser(b.OutOld), + Json: b.StreamFormatter.Json(), + } + + if err := b.Daemon.Repositories().Pull(remote, tag, imagePullConfig); err != nil { return nil, err } + image, err := b.Daemon.Repositories().LookupImage(name) if err != nil { return nil, err @@ -476,7 +489,7 @@ func (b *Builder) processImageFrom(img *imagepkg.Image) error { fmt.Fprintf(b.ErrStream, "# Executing %d build triggers\n", nTriggers) } - // Copy the ONBUILD triggers, and remove them from the config, since the config will be commited. + // Copy the ONBUILD triggers, and remove them from the config, since the config will be committed. onBuildTriggers := b.Config.OnBuild b.Config.OnBuild = []string{} @@ -540,7 +553,9 @@ func (b *Builder) create() (*daemon.Container, error) { hostConfig := &runconfig.HostConfig{ CpuShares: b.cpuShares, + CpuQuota: b.cpuQuota, CpusetCpus: b.cpuSetCpus, + CpusetMems: b.cpuSetMems, Memory: b.memory, MemorySwap: b.memorySwap, } @@ -559,12 +574,13 @@ func (b *Builder) create() (*daemon.Container, error) { b.TmpContainers[c.ID] = struct{}{} fmt.Fprintf(b.OutStream, " ---> Running in %s\n", stringid.TruncateID(c.ID)) - if len(config.Cmd) > 0 { + if config.Cmd.Len() > 0 { // override the entry point that may have been picked up from the base image - c.Path = config.Cmd[0] - c.Args = config.Cmd[1:] + s := config.Cmd.Slice() + c.Path = s[0] + c.Args = s[1:] } else { - config.Cmd = []string{} + config.Cmd = runconfig.NewCommand() } return c, nil @@ -601,11 +617,10 @@ func (b *Builder) run(c *daemon.Container) error { // Wait for it to finish if ret, _ := c.WaitStop(-1 * time.Second); ret != 0 { - err := &jsonmessage.JSONError{ + return &jsonmessage.JSONError{ Message: fmt.Sprintf("The command %v returned a non-zero code: %d", b.Config.Cmd, ret), Code: ret, } - return err } return nil @@ -637,14 +652,12 @@ func (b *Builder) addContext(container *daemon.Container, orig, dest string, dec err error destExists = true origPath = path.Join(b.contextPath, orig) - destPath = path.Join(container.RootfsPath(), dest) + destPath string ) - if destPath != container.RootfsPath() { - destPath, err = symlink.FollowSymlinkInScope(destPath, container.RootfsPath()) - if err != nil { - return err - } + destPath, err = container.GetResourcePath(dest) + if err != nil { + return err } // Preserve the trailing '/' diff --git a/builder/job.go b/builder/job.go index 89ed52f873d74..0ad488aae855c 100644 --- a/builder/job.go +++ b/builder/job.go @@ -2,20 +2,20 @@ package builder import ( "bytes" - "encoding/json" "fmt" "io" "io/ioutil" "os" - "os/exec" "strings" + "sync" "github.com/docker/docker/api" "github.com/docker/docker/builder/parser" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" - "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/urlutil" @@ -36,43 +36,62 @@ var validCommitCommands = map[string]bool{ "onbuild": true, } -type BuilderJob struct { - Engine *engine.Engine - Daemon *daemon.Daemon +type Config struct { + DockerfileName string + RemoteURL string + RepoName string + SuppressOutput bool + NoCache bool + Remove bool + ForceRemove bool + Pull bool + JSONFormat bool + Memory int64 + MemorySwap int64 + CpuShares int64 + CpuQuota int64 + CpuSetCpus string + CpuSetMems string + AuthConfig *cliconfig.AuthConfig + ConfigFile *cliconfig.ConfigFile + + Stdout io.Writer + Context io.ReadCloser + // When closed, the job has been cancelled. + // Note: not all jobs implement cancellation. + // See Job.Cancel() and Job.WaitCancelled() + cancelled chan struct{} + cancelOnce sync.Once } -func (b *BuilderJob) Install() { - b.Engine.Register("build", b.CmdBuild) - b.Engine.Register("build_config", b.CmdBuildConfig) +// When called, causes the Job.WaitCancelled channel to unblock. +func (b *Config) Cancel() { + b.cancelOnce.Do(func() { + close(b.cancelled) + }) } -func (b *BuilderJob) CmdBuild(job *engine.Job) error { - if len(job.Args) != 0 { - return fmt.Errorf("Usage: %s\n", job.Name) +// Returns a channel which is closed ("never blocks") when the job is cancelled. +func (b *Config) WaitCancelled() <-chan struct{} { + return b.cancelled +} + +func NewBuildConfig() *Config { + return &Config{ + AuthConfig: &cliconfig.AuthConfig{}, + ConfigFile: &cliconfig.ConfigFile{}, + cancelled: make(chan struct{}), } +} + +func Build(d *daemon.Daemon, buildConfig *Config) error { var ( - dockerfileName = job.Getenv("dockerfile") - remoteURL = job.Getenv("remote") - repoName = job.Getenv("t") - suppressOutput = job.GetenvBool("q") - noCache = job.GetenvBool("nocache") - rm = job.GetenvBool("rm") - forceRm = job.GetenvBool("forcerm") - pull = job.GetenvBool("pull") - memory = job.GetenvInt64("memory") - memorySwap = job.GetenvInt64("memswap") - cpuShares = job.GetenvInt64("cpushares") - cpuSetCpus = job.Getenv("cpusetcpus") - authConfig = ®istry.AuthConfig{} - configFile = ®istry.ConfigFile{} - tag string - context io.ReadCloser + repoName string + tag string + context io.ReadCloser ) - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("configFile", configFile) - - repoName, tag = parsers.ParseRepositoryTag(repoName) + repoName, tag = parsers.ParseRepositoryTag(buildConfig.RepoName) if repoName != "" { if err := registry.ValidateRepositoryName(repoName); err != nil { return err @@ -84,29 +103,22 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) error { } } - if remoteURL == "" { - context = ioutil.NopCloser(job.Stdin) - } else if urlutil.IsGitURL(remoteURL) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") + if buildConfig.RemoteURL == "" { + context = ioutil.NopCloser(buildConfig.Context) + } else if urlutil.IsGitURL(buildConfig.RemoteURL) { + root, err := utils.GitClone(buildConfig.RemoteURL) if err != nil { return err } defer os.RemoveAll(root) - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c - } else if urlutil.IsURL(remoteURL) { - f, err := utils.Download(remoteURL) + } else if urlutil.IsURL(buildConfig.RemoteURL) { + f, err := httputils.Download(buildConfig.RemoteURL) if err != nil { return err } @@ -118,9 +130,9 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) error { // When we're downloading just a Dockerfile put it in // the default name - don't allow the client to move/specify it - dockerfileName = api.DefaultDockerfileName + buildConfig.DockerfileName = api.DefaultDockerfileName - c, err := archive.Generate(dockerfileName, string(dockerFile)) + c, err := archive.Generate(buildConfig.DockerfileName, string(dockerFile)) if err != nil { return err } @@ -128,34 +140,35 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) error { } defer context.Close() - sf := streamformatter.NewStreamFormatter(job.GetenvBool("json")) + sf := streamformatter.NewStreamFormatter(buildConfig.JSONFormat) builder := &Builder{ - Daemon: b.Daemon, - Engine: b.Engine, + Daemon: d, OutStream: &streamformatter.StdoutFormater{ - Writer: job.Stdout, + Writer: buildConfig.Stdout, StreamFormatter: sf, }, ErrStream: &streamformatter.StderrFormater{ - Writer: job.Stdout, + Writer: buildConfig.Stdout, StreamFormatter: sf, }, - Verbose: !suppressOutput, - UtilizeCache: !noCache, - Remove: rm, - ForceRemove: forceRm, - Pull: pull, - OutOld: job.Stdout, + Verbose: !buildConfig.SuppressOutput, + UtilizeCache: !buildConfig.NoCache, + Remove: buildConfig.Remove, + ForceRemove: buildConfig.ForceRemove, + Pull: buildConfig.Pull, + OutOld: buildConfig.Stdout, StreamFormatter: sf, - AuthConfig: authConfig, - AuthConfigFile: configFile, - dockerfileName: dockerfileName, - cpuShares: cpuShares, - cpuSetCpus: cpuSetCpus, - memory: memory, - memorySwap: memorySwap, - cancelled: job.WaitCancelled(), + AuthConfig: buildConfig.AuthConfig, + ConfigFile: buildConfig.ConfigFile, + dockerfileName: buildConfig.DockerfileName, + cpuShares: buildConfig.CpuShares, + cpuQuota: buildConfig.CpuQuota, + cpuSetCpus: buildConfig.CpuSetCpus, + cpuSetMems: buildConfig.CpuSetMems, + memory: buildConfig.Memory, + memorySwap: buildConfig.MemorySwap, + cancelled: buildConfig.WaitCancelled(), } id, err := builder.Run(context) @@ -164,41 +177,27 @@ func (b *BuilderJob) CmdBuild(job *engine.Job) error { } if repoName != "" { - b.Daemon.Repositories().Set(repoName, tag, id, true) + return d.Repositories().Tag(repoName, tag, id, true) } return nil } -func (b *BuilderJob) CmdBuildConfig(job *engine.Job) error { - if len(job.Args) != 0 { - return fmt.Errorf("Usage: %s\n", job.Name) - } - - var ( - changes = job.GetenvList("changes") - newConfig runconfig.Config - ) - - if err := job.GetenvJson("config", &newConfig); err != nil { - return err - } - +func BuildFromConfig(d *daemon.Daemon, c *runconfig.Config, changes []string) (*runconfig.Config, error) { ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) if err != nil { - return err + return nil, err } // ensure that the commands are valid for _, n := range ast.Children { if !validCommitCommands[n.Value] { - return fmt.Errorf("%s is not a valid change command", n.Value) + return nil, fmt.Errorf("%s is not a valid change command", n.Value) } } builder := &Builder{ - Daemon: b.Daemon, - Engine: b.Engine, - Config: &newConfig, + Daemon: d, + Config: c, OutStream: ioutil.Discard, ErrStream: ioutil.Discard, disableCommit: true, @@ -206,12 +205,32 @@ func (b *BuilderJob) CmdBuildConfig(job *engine.Job) error { for i, n := range ast.Children { if err := builder.dispatch(i, n); err != nil { - return err + return nil, err } } - if err := json.NewEncoder(job.Stdout).Encode(builder.Config); err != nil { - return err + return builder.Config, nil +} + +func Commit(d *daemon.Daemon, name string, c *daemon.ContainerCommitConfig) (string, error) { + container, err := d.Get(name) + if err != nil { + return "", err } - return nil + + newConfig, err := BuildFromConfig(d, c.Config, c.Changes) + if err != nil { + return "", err + } + + if err := runconfig.Merge(newConfig, container.Config); err != nil { + return "", err + } + + img, err := d.Commit(container, c.Repo, c.Tag, c.Comment, c.Author, c.Pause, newConfig) + if err != nil { + return "", err + } + + return img.ID, nil } diff --git a/builder/parser/line_parsers.go b/builder/parser/line_parsers.go index 6e284d6fc3b85..8db360ca3753a 100644 --- a/builder/parser/line_parsers.go +++ b/builder/parser/line_parsers.go @@ -233,7 +233,7 @@ func parseString(rest string) (*Node, map[string]bool, error) { // parseJSON converts JSON arrays to an AST. func parseJSON(rest string) (*Node, map[string]bool, error) { var myJson []interface{} - if err := json.Unmarshal([]byte(rest), &myJson); err != nil { + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJson); err != nil { return nil, nil, err } @@ -279,7 +279,7 @@ func parseMaybeJSON(rest string) (*Node, map[string]bool, error) { } // parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attmpts to parse it as a whitespace +// so, passes to parseJSON; if not, attempts to parse it as a whitespace // delimited string. func parseMaybeJSONToList(rest string) (*Node, map[string]bool, error) { node, attrs, err := parseJSON(rest) diff --git a/builtins/builtins.go b/builtins/builtins.go deleted file mode 100644 index 8957b58332414..0000000000000 --- a/builtins/builtins.go +++ /dev/null @@ -1,48 +0,0 @@ -package builtins - -import ( - "runtime" - - "github.com/docker/docker/api" - apiserver "github.com/docker/docker/api/server" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/parsers/kernel" -) - -func Register(eng *engine.Engine) error { - if err := remote(eng); err != nil { - return err - } - if err := eng.Register("version", dockerVersion); err != nil { - return err - } - - return nil -} - -// remote: a RESTful api for cross-docker communication -func remote(eng *engine.Engine) error { - if err := eng.Register("serveapi", apiserver.ServeApi); err != nil { - return err - } - return eng.Register("acceptconnections", apiserver.AcceptConnections) -} - -// builtins jobs independent of any subsystem -func dockerVersion(job *engine.Job) error { - v := &engine.Env{} - v.SetJson("Version", dockerversion.VERSION) - v.SetJson("ApiVersion", api.APIVERSION) - v.SetJson("GitCommit", dockerversion.GITCOMMIT) - v.Set("GoVersion", runtime.Version()) - v.Set("Os", runtime.GOOS) - v.Set("Arch", runtime.GOARCH) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - v.Set("KernelVersion", kernelVersion.String()) - } - if _, err := v.WriteTo(job.Stdout); err != nil { - return err - } - return nil -} diff --git a/cliconfig/config.go b/cliconfig/config.go new file mode 100644 index 0000000000000..2a27589d20dd9 --- /dev/null +++ b/cliconfig/config.go @@ -0,0 +1,207 @@ +package cliconfig + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/homedir" +) + +const ( + // Where we store the config file + CONFIGFILE = "config.json" + OLD_CONFIGFILE = ".dockercfg" + + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + DEFAULT_INDEXSERVER = "https://index.docker.io/v1/" +) + +var ( + ErrConfigFileMissing = errors.New("The Auth config file is missing") +) + +// Registry Auth Info +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth"` + Email string `json:"email"` + ServerAddress string `json:"serveraddress,omitempty"` +} + +// ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]AuthConfig `json:"auths"` + HttpHeaders map[string]string `json:"HttpHeaders,omitempty"` + filename string // Note: not serialized - for internal use only +} + +func NewConfigFile(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + HttpHeaders: make(map[string]string), + filename: fn, + } +} + +// load up the auth config information and return values +// FIXME: use the internal golang config parser +func Load(configDir string) (*ConfigFile, error) { + if configDir == "" { + configDir = filepath.Join(homedir.Get(), ".docker") + } + + configFile := ConfigFile{ + AuthConfigs: make(map[string]AuthConfig), + filename: filepath.Join(configDir, CONFIGFILE), + } + + // Try happy path first - latest config file + if _, err := os.Stat(configFile.filename); err == nil { + file, err := os.Open(configFile.filename) + if err != nil { + return &configFile, err + } + defer file.Close() + + if err := json.NewDecoder(file).Decode(&configFile); err != nil { + return &configFile, err + } + + for addr, ac := range configFile.AuthConfigs { + ac.Username, ac.Password, err = DecodeAuth(ac.Auth) + if err != nil { + return &configFile, err + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + + return &configFile, nil + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return &configFile, err + } + + // Can't find latest config file so check for the old one + confFile := filepath.Join(homedir.Get(), OLD_CONFIGFILE) + + if _, err := os.Stat(confFile); err != nil { + return &configFile, nil //missing file is not an error + } + + b, err := ioutil.ReadFile(confFile) + if err != nil { + return &configFile, err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return &configFile, fmt.Errorf("The Auth config file is empty") + } + authConfig := AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = DecodeAuth(origAuth[1]) + if err != nil { + return &configFile, err + } + origEmail := strings.Split(arr[1], " = ") + if len(origEmail) != 2 { + return &configFile, fmt.Errorf("Invalid Auth config file") + } + authConfig.Email = origEmail[1] + authConfig.ServerAddress = DEFAULT_INDEXSERVER + configFile.AuthConfigs[DEFAULT_INDEXSERVER] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = DecodeAuth(authConfig.Auth) + if err != nil { + return &configFile, err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return &configFile, nil +} + +func (configFile *ConfigFile) Save() error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + + authCopy.Auth = EncodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { + return err + } + + if err := ioutil.WriteFile(configFile.filename, data, 0600); err != nil { + return err + } + + return nil +} + +func (config *ConfigFile) Filename() string { + return config.filename +} + +// create a base64 encoded auth string to store in config +func EncodeAuth(authConfig *AuthConfig) string { + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decode the auth string +func DecodeAuth(authStr string) (string, string, error) { + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", fmt.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} diff --git a/cliconfig/config_file_test.go b/cliconfig/config_file_test.go new file mode 100644 index 0000000000000..6d1125f7bf816 --- /dev/null +++ b/cliconfig/config_file_test.go @@ -0,0 +1,157 @@ +package cliconfig + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/pkg/homedir" +) + +func TestMissingFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestSaveFileToDirs(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + + tmpHome += "/.docker" + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on missing file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestEmptyFile(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, CONFIGFILE) + ioutil.WriteFile(fn, []byte(""), 0600) + + _, err := Load(tmpHome) + if err == nil { + t.Fatalf("Was supposed to fail") + } +} + +func TestEmptyJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, CONFIGFILE) + ioutil.WriteFile(fn, []byte("{}"), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE)) + if !strings.Contains(string(buf), `"auths":`) { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestOldJson(t *testing.T) { + if runtime.GOOS == "windows" { + return + } + + tmpHome, _ := ioutil.TempDir("", "config-test") + defer os.RemoveAll(tmpHome) + + homeKey := homedir.Key() + homeVal := homedir.Get() + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpHome) + + fn := filepath.Join(tmpHome, OLD_CONFIGFILE) + js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} + +func TestNewJson(t *testing.T) { + tmpHome, _ := ioutil.TempDir("", "config-test") + fn := filepath.Join(tmpHome, CONFIGFILE) + js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` + ioutil.WriteFile(fn, []byte(js), 0600) + + config, err := Load(tmpHome) + if err != nil { + t.Fatalf("Failed loading on empty json file: %q", err) + } + + ac := config.AuthConfigs["https://index.docker.io/v1/"] + if ac.Email != "user@example.com" || ac.Username != "joejoe" || ac.Password != "hello" { + t.Fatalf("Missing data from parsing:\n%q", config) + } + + // Now save it and make sure it shows up in new form + err = config.Save() + if err != nil { + t.Fatalf("Failed to save: %q", err) + } + + buf, err := ioutil.ReadFile(filepath.Join(tmpHome, CONFIGFILE)) + if !strings.Contains(string(buf), `"auths":`) || + !strings.Contains(string(buf), "user@example.com") { + t.Fatalf("Should have save in new form: %s", string(buf)) + } +} diff --git a/contrib/builder/deb/README.md b/contrib/builder/deb/README.md new file mode 100644 index 0000000000000..a6fd70dca72c3 --- /dev/null +++ b/contrib/builder/deb/README.md @@ -0,0 +1,5 @@ +# `dockercore/builder-deb` + +This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. + +To add new tags, see [`contrib/builder/deb` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/deb/build.sh b/contrib/builder/deb/build.sh new file mode 100755 index 0000000000000..8271d9dc4740d --- /dev/null +++ b/contrib/builder/deb/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +set -x +./generate.sh +for d in */; do + docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" +done diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile new file mode 100644 index 0000000000000..ad90a21183e14 --- /dev/null +++ b/contrib/builder/deb/debian-jessie/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM debian:jessie + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile new file mode 100644 index 0000000000000..87274d4096878 --- /dev/null +++ b/contrib/builder/deb/debian-wheezy/Dockerfile @@ -0,0 +1,15 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM debian:wheezy +RUN echo deb http://http.debian.net/debian wheezy-backports main > /etc/apt/sources.list.d/wheezy-backports.list + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/generate.sh b/contrib/builder/deb/generate.sh new file mode 100755 index 0000000000000..cd187c7ce8ea1 --- /dev/null +++ b/contrib/builder/deb/generate.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -e + +# usage: ./generate.sh [versions] +# ie: ./generate.sh +# to update all Dockerfiles in this directory +# or: ./generate.sh debian-jessie +# to only update debian-jessie/Dockerfile +# or: ./generate.sh debian-newversion +# to create a new folder and a Dockerfile within it + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" + +versions=( "$@" ) +if [ ${#versions[@]} -eq 0 ]; then + versions=( */ ) +fi +versions=( "${versions[@]%/}" ) + +for version in "${versions[@]}"; do + distro="${version%-*}" + suite="${version##*-}" + from="${distro}:${suite}" + + mkdir -p "$version" + echo "$version -> FROM $from" + cat > "$version/Dockerfile" <<-EOF + # + # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! + # + + FROM $from + EOF + + case "$from" in + debian:wheezy) + # add -backports, like our users have to + echo "RUN echo deb http://http.debian.net/debian $suite-backports main > /etc/apt/sources.list.d/$suite-backports.list" >> "$version/Dockerfile" + ;; + esac + + echo >> "$version/Dockerfile" + + # this list is sorted alphabetically; please keep it that way + packages=( + bash-completion # for bash-completion debhelper integration + btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) + build-essential # "essential for building Debian packages" + curl ca-certificates # for downloading Go + debhelper # for easy ".deb" building + dh-systemd # for systemd debhelper integration + git # for "git commit" info in "docker -v" + libapparmor-dev # for "sys/apparmor.h" + libdevmapper-dev # for "libdevmapper.h" + libsqlite3-dev # for "sqlite3.h" + ) + echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" + echo 'RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local' >> "$version/Dockerfile" + echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" + + echo >> "$version/Dockerfile" + + echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" + awk '$1 == "ENV" && $2 == "DOCKER_BUILDTAGS" { print; exit }' ../../../Dockerfile >> "$version/Dockerfile" +done diff --git a/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile new file mode 100644 index 0000000000000..5715b2698b80b --- /dev/null +++ b/contrib/builder/deb/ubuntu-debootstrap-trusty/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu-debootstrap:trusty + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile new file mode 100644 index 0000000000000..3862b83707b52 --- /dev/null +++ b/contrib/builder/deb/ubuntu-debootstrap-utopic/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu-debootstrap:utopic + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile new file mode 100644 index 0000000000000..15911b268d39a --- /dev/null +++ b/contrib/builder/deb/ubuntu-debootstrap-vivid/Dockerfile @@ -0,0 +1,14 @@ +# +# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/generate.sh"! +# + +FROM ubuntu-debootstrap:vivid + +RUN apt-get update && apt-get install -y bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-systemd git libapparmor-dev libdevmapper-dev libsqlite3-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* + +ENV GO_VERSION 1.4.2 +RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvzC /usr/local +ENV PATH $PATH:/usr/local/go/bin + +ENV AUTO_GOPATH 1 +ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/check-config.sh b/contrib/check-config.sh index 59649d6c66bc3..8c55de590ee6a 100755 --- a/contrib/check-config.sh +++ b/contrib/check-config.sh @@ -27,7 +27,7 @@ is_set() { zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null } -# see http://en.wikipedia.org/wiki/ANSI_escape_code#Colors +# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors declare -A colors=( [black]=30 [red]=31 @@ -161,6 +161,7 @@ echo 'Optional Features:' flags=( RESOURCE_COUNTERS CGROUP_PERF + CFS_BANDWIDTH ) check_flags "${flags[@]}" diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker index ad48f2886cdb7..5b7a102a68396 100755 --- a/contrib/completion/bash/docker +++ b/contrib/completion/bash/docker @@ -279,7 +279,7 @@ _docker_build() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--cpu-shares -c --cpuset-cpus --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--cpu-shares -c --cpuset-cpus --cpu-quota --file -f --force-rm --help --memory -m --memory-swap --no-cache --pull --quiet -q --rm --tag -t" -- "$cur" ) ) ;; *) local counter="$(__docker_pos_first_nonflag '--tag|-t')" @@ -407,7 +407,7 @@ _docker_events() { _docker_exec() { case "$cur" in -*) - COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i -t --tty" -- "$cur" ) ) + COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) ;; *) __docker_containers_running @@ -770,6 +770,7 @@ _docker_run() { --cidfile --cpuset --cpu-shares -c + --cpu-quota --device --dns --dns-search @@ -1150,6 +1151,7 @@ _docker() { --dns --dns-search --exec-driver -e + --exec-opt --fixed-cidr --fixed-cidr-v6 --graph -g diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish index d3237588effe8..c5359118538fc 100644 --- a/contrib/completion/fish/docker.fish +++ b/contrib/completion/fish/docker.fish @@ -51,6 +51,7 @@ complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' complete -c docker -f -n '__fish_docker_no_subcommand' -s e -l exec-driver -d 'Force the Docker runtime to use a specific exec driver' +complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set exec driver options' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go index 9ad094a341a77..0a0b0803d39ac 100644 --- a/contrib/docker-device-tool/device_tool.go +++ b/contrib/docker-device-tool/device_tool.go @@ -125,7 +125,7 @@ func main() { err = devices.ResizePool(size) if err != nil { - fmt.Println("Error resizeing pool: ", err) + fmt.Println("Error resizing pool: ", err) os.Exit(1) } diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd index a9d21b17089a3..f251e9af5a51a 100755 --- a/contrib/init/openrc/docker.initd +++ b/contrib/init/openrc/docker.initd @@ -7,6 +7,7 @@ DOCKER_LOGFILE=${DOCKER_LOGFILE:-/var/log/${SVCNAME}.log} DOCKER_PIDFILE=${DOCKER_PIDFILE:-/run/${SVCNAME}.pid} DOCKER_BINARY=${DOCKER_BINARY:-/usr/bin/docker} DOCKER_OPTS=${DOCKER_OPTS:-} +UNSHARE_BINARY=${UNSHARE_BINARY:-/usr/bin/unshare} start() { checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" @@ -16,11 +17,12 @@ start() { ebegin "Starting docker daemon" start-stop-daemon --start --background \ - --exec "$DOCKER_BINARY" \ + --exec "$UNSHARE_BINARY" \ --pidfile "$DOCKER_PIDFILE" \ --stdout "$DOCKER_LOGFILE" \ --stderr "$DOCKER_LOGFILE" \ - -- -d -p "$DOCKER_PIDFILE" \ + -- --mount \ + -- "$DOCKER_BINARY" -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS eend $? } diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker index cf33c837791a0..35fd71f13e3a9 100755 --- a/contrib/init/sysvinit-debian/docker +++ b/contrib/init/sysvinit-debian/docker @@ -30,6 +30,7 @@ DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid DOCKER_LOGFILE=/var/log/$BASE.log DOCKER_OPTS= DOCKER_DESC="Docker" +UNSHARE=${UNSHARE:-/usr/bin/unshare} # Get lsb functions . /lib/lsb/init-functions @@ -99,11 +100,11 @@ case "$1" in log_begin_msg "Starting $DOCKER_DESC: $BASE" start-stop-daemon --start --background \ --no-close \ - --exec "$DOCKER" \ + --exec "$UNSHARE" \ --pidfile "$DOCKER_SSD_PIDFILE" \ --make-pidfile \ - -- \ - -d -p "$DOCKER_PIDFILE" \ + -- --mount \ + -- "$DOCKER" -d -p "$DOCKER_PIDFILE" \ $DOCKER_OPTS \ >> "$DOCKER_LOGFILE" 2>&1 log_end_msg $? diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf index f9930bd3962ea..5e8df6e3c207f 100644 --- a/contrib/init/upstart/docker.conf +++ b/contrib/init/upstart/docker.conf @@ -37,7 +37,7 @@ script if [ -f /etc/default/$UPSTART_JOB ]; then . /etc/default/$UPSTART_JOB fi - exec "$DOCKER" -d $DOCKER_OPTS + exec unshare -m -- "$DOCKER" -d $DOCKER_OPTS end script # Don't emit "started" event until docker.sock is ready. @@ -49,7 +49,7 @@ post-start script fi if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then while ! [ -e /var/run/docker.sock ]; do - initctl status $UPSTART_JOB | grep -q "stop/" && exit 1 + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 echo "Waiting for /var/run/docker.sock" sleep 0.1 done diff --git a/contrib/project-stats.sh b/contrib/project-stats.sh index 985a77f22dcbf..2691c72ffbd6a 100755 --- a/contrib/project-stats.sh +++ b/contrib/project-stats.sh @@ -3,7 +3,7 @@ ## Run this script from the root of the docker repository ## to query project stats useful to the maintainers. ## You will need to install `pulls` and `issues` from -## http://github.com/crosbymichael/pulls +## https://github.com/crosbymichael/pulls set -e diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt index 37cc7be9154f8..e69e2b7b30fca 100644 --- a/contrib/syntax/vim/doc/dockerfile.txt +++ b/contrib/syntax/vim/doc/dockerfile.txt @@ -1,6 +1,6 @@ *dockerfile.txt* Syntax highlighting for Dockerfiles -Author: Honza Pokorny +Author: Honza Pokorny License: BSD INSTALLATION *installation* diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim index 36691e2504162..bd092686642e9 100644 --- a/contrib/syntax/vim/syntax/dockerfile.vim +++ b/contrib/syntax/vim/syntax/dockerfile.vim @@ -1,5 +1,5 @@ " dockerfile.vim - Syntax highlighting for Dockerfiles -" Maintainer: Honza Pokorny +" Maintainer: Honza Pokorny " Version: 0.5 diff --git a/daemon/attach.go b/daemon/attach.go index f95de41d52ee8..b2b8d0906789b 100644 --- a/daemon/attach.go +++ b/daemon/attach.go @@ -10,7 +10,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/jsonlog" "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/utils" ) func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { @@ -131,7 +130,7 @@ func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io var err error if tty { - _, err = utils.CopyEscapable(cStdin, stdin) + _, err = copyEscapable(cStdin, stdin) } else { _, err = io.Copy(cStdin, stdin) @@ -185,3 +184,46 @@ func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io return nil }) } + +// Code c/c from io.Copy() modified to handle escape sequence +func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { + buf := make([]byte, 32*1024) + for { + nr, er := src.Read(buf) + if nr > 0 { + // ---- Docker addition + // char 16 is C-p + if nr == 1 && buf[0] == 16 { + nr, er = src.Read(buf) + // char 17 is C-q + if nr == 1 && buf[0] == 17 { + if err := src.Close(); err != nil { + return 0, err + } + return 0, nil + } + } + // ---- End of docker + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/daemon/checkpoint.go b/daemon/checkpoint.go new file mode 100644 index 0000000000000..538e921539a42 --- /dev/null +++ b/daemon/checkpoint.go @@ -0,0 +1,50 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/libcontainer" +) + +// Checkpoint a running container. +func (daemon *Daemon) ContainerCheckpoint(name string, opts *libcontainer.CriuOpts) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + if err := container.Checkpoint(opts); err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + container.LogEvent("checkpoint") + return nil +} + +// Restore a checkpointed container. +func (daemon *Daemon) ContainerRestore(name string, opts *libcontainer.CriuOpts) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + // TODO: It's possible we only want to bypass the checkpointed check, + // I'm not sure how this will work if the container is already running + if container.IsRunning() { + return fmt.Errorf("Container %s already running", name) + } + + if !container.HasBeenCheckpointed() { + return fmt.Errorf("Container %s is not checkpointed", name) + } + + if err = container.Restore(opts); err != nil { + container.LogEvent("die") + return fmt.Errorf("Cannot restore container %s: %s", name, err) + } + + container.LogEvent("restore") + return nil +} diff --git a/daemon/commit.go b/daemon/commit.go index 1daf57a4fe6c5..0c49eb2c95193 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -1,55 +1,18 @@ package daemon import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/runconfig" ) -func (daemon *Daemon) ContainerCommit(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER\n", job.Name) - } - name := job.Args[0] - - container, err := daemon.Get(name) - if err != nil { - return err - } - - var ( - config = container.Config - stdoutBuffer = bytes.NewBuffer(nil) - newConfig runconfig.Config - ) - - buildConfigJob := daemon.eng.Job("build_config") - buildConfigJob.Stdout.Add(stdoutBuffer) - buildConfigJob.Setenv("changes", job.Getenv("changes")) - // FIXME this should be remove when we remove deprecated config param - buildConfigJob.Setenv("config", job.Getenv("config")) - - if err := buildConfigJob.Run(); err != nil { - return err - } - if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { - return err - } - - if err := runconfig.Merge(&newConfig, config); err != nil { - return err - } - - img, err := daemon.Commit(container, job.Getenv("repo"), job.Getenv("tag"), job.Getenv("comment"), job.Getenv("author"), job.GetenvBool("pause"), &newConfig) - if err != nil { - return err - } - job.Printf("%s\n", img.ID) - return nil +type ContainerCommitConfig struct { + Pause bool + Repo string + Tag string + Author string + Comment string + Changes []string + Config *runconfig.Config } // Commit creates a new filesystem image from the current state of a container. @@ -90,7 +53,7 @@ func (daemon *Daemon) Commit(container *Container, repository, tag, comment, aut // Register the image if needed if repository != "" { - if err := daemon.repositories.Set(repository, tag, img.ID, true); err != nil { + if err := daemon.repositories.Tag(repository, tag, img.ID, true); err != nil { return img, err } } diff --git a/daemon/config.go b/daemon/config.go index b46895c58f5a2..43b08531b52e1 100644 --- a/daemon/config.go +++ b/daemon/config.go @@ -29,6 +29,7 @@ type Config struct { GraphDriver string GraphOptions []string ExecDriver string + ExecOptions []string Mtu int SocketGroup string EnableCors bool @@ -58,6 +59,8 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.Bridge.Iface, []string{"b", "-bridge"}, "", "Attach containers to a network bridge") flag.StringVar(&config.Bridge.FixedCIDR, []string{"-fixed-cidr"}, "", "IPv4 subnet for fixed IPs") flag.StringVar(&config.Bridge.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", "IPv6 subnet for fixed IPs") + flag.StringVar(&config.Bridge.DefaultGatewayIPv4, []string{"-default-gateway"}, "", "Container default gateway IPv4 address") + flag.StringVar(&config.Bridge.DefaultGatewayIPv6, []string{"-default-gateway-v6"}, "", "Container default gateway IPv6 address") flag.BoolVar(&config.Bridge.InterContainerCommunication, []string{"#icc", "-icc"}, true, "Enable inter-container communication") flag.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", "Storage driver to use") flag.StringVar(&config.ExecDriver, []string{"e", "-exec-driver"}, "native", "Exec driver to use") @@ -68,13 +71,14 @@ func (config *Config) InstallFlags() { flag.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", "Set CORS headers in the remote API") opts.IPVar(&config.Bridge.DefaultIp, []string{"#ip", "-ip"}, "0.0.0.0", "Default IP when binding container ports") opts.ListVar(&config.GraphOptions, []string{"-storage-opt"}, "Set storage driver options") + opts.ListVar(&config.ExecOptions, []string{"-exec-opt"}, "Set exec driver options") // FIXME: why the inconsistency between "hosts" and "sockets"? opts.IPListVar(&config.Dns, []string{"#dns", "-dns"}, "DNS server to use") opts.DnsSearchListVar(&config.DnsSearch, []string{"-dns-search"}, "DNS search domains to use") opts.LabelListVar(&config.Labels, []string{"-label"}, "Set key=value labels to the daemon") config.Ulimits = make(map[string]*ulimit.Ulimit) opts.UlimitMapVar(config.Ulimits, []string{"-default-ulimit"}, "Set default ulimits for containers") - flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Containers logging driver") + flag.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", "Default driver for container logs") } func getDefaultNetworkMtu() int { diff --git a/daemon/container.go b/daemon/container.go index 46defe9683298..308471a0ff9ec 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -22,6 +22,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/journald" "github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/logger/syslog" "github.com/docker/docker/daemon/network" @@ -149,8 +150,7 @@ func (container *Container) toDisk() error { return err } - err = ioutil.WriteFile(pth, data, 0666) - if err != nil { + if err := ioutil.WriteFile(pth, data, 0666); err != nil { return err } @@ -179,11 +179,13 @@ func (container *Container) readHostConfig() error { return nil } - data, err := ioutil.ReadFile(pth) + f, err := os.Open(pth) if err != nil { return err } - return json.Unmarshal(data, container.hostConfig) + defer f.Close() + + return json.NewDecoder(f).Decode(&container.hostConfig) } func (container *Container) WriteHostConfig() error { @@ -209,12 +211,37 @@ func (container *Container) LogEvent(action string) { ) } -func (container *Container) getResourcePath(path string) (string, error) { +// Evaluates `path` in the scope of the container's basefs, with proper path +// sanitisation. Symlinks are all scoped to the basefs of the container, as +// though the container's basefs was `/`. +// +// The basefs of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's basefs +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) } -func (container *Container) getRootResourcePath(path string) (string, error) { +// Evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) } @@ -356,6 +383,8 @@ func populateCommand(c *Container, env []string) error { MemorySwap: c.hostConfig.MemorySwap, CpuShares: c.hostConfig.CpuShares, CpusetCpus: c.hostConfig.CpusetCpus, + CpusetMems: c.hostConfig.CpusetMems, + CpuQuota: c.hostConfig.CpuQuota, Rlimits: rlimits, } @@ -511,7 +540,7 @@ func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser { } func (container *Container) buildHostnameFile() error { - hostnamePath, err := container.getRootResourcePath("hostname") + hostnamePath, err := container.GetRootResourcePath("hostname") if err != nil { return err } @@ -525,7 +554,7 @@ func (container *Container) buildHostnameFile() error { func (container *Container) buildHostsFiles(IP string) error { - hostsPath, err := container.getRootResourcePath("hosts") + hostsPath, err := container.GetRootResourcePath("hosts") if err != nil { return err } @@ -575,9 +604,16 @@ func (container *Container) AllocateNetwork() error { var ( err error eng = container.daemon.eng + networkSettings *network.Settings ) - networkSettings, err := bridge.Allocate(container.ID, container.Config.MacAddress, "", "") + if container.IsCheckpointed() { + // FIXME: ipv6 support... + networkSettings, err = bridge.Allocate(container.ID, container.Config.MacAddress, container.NetworkSettings.IPAddress, "", true) + } else { + networkSettings, err = bridge.Allocate(container.ID, container.Config.MacAddress, "", "", false) + } + if err != nil { return err } @@ -661,7 +697,7 @@ func (container *Container) RestoreNetwork() error { eng := container.daemon.eng // Re-allocate the interface with the same IP and MAC address. - if _, err := bridge.Allocate(container.ID, container.NetworkSettings.MacAddress, container.NetworkSettings.IPAddress, ""); err != nil { + if _, err := bridge.Allocate(container.ID, container.NetworkSettings.MacAddress, container.NetworkSettings.IPAddress, "", true); err != nil { return err } @@ -677,7 +713,11 @@ func (container *Container) RestoreNetwork() error { // cleanup releases any network resources allocated to the container along with any rules // around how containers are linked together. It also unmounts the container's root filesystem. func (container *Container) cleanup() { - container.ReleaseNetwork() + if container.IsCheckpointed() { + logrus.Debugf("not calling ReleaseNetwork() for checkpointed container %s", container.ID) + } else { + container.ReleaseNetwork() + } // Disable all active links if container.activeLinks != nil { @@ -891,7 +931,7 @@ func (container *Container) Unmount() error { } func (container *Container) logPath(name string) (string, error) { - return container.getRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) + return container.GetRootResourcePath(fmt.Sprintf("%s-%s.log", container.ID, name)) } func (container *Container) ReadLog(name string) (io.Reader, error) { @@ -903,11 +943,11 @@ func (container *Container) ReadLog(name string) (io.Reader, error) { } func (container *Container) hostConfigPath() (string, error) { - return container.getRootResourcePath("hostconfig.json") + return container.GetRootResourcePath("hostconfig.json") } func (container *Container) jsonPath() (string, error) { - return container.getRootResourcePath("config.json") + return container.GetRootResourcePath("config.json") } // This method must be exported to be used from the lxc template @@ -954,38 +994,76 @@ func (container *Container) GetSize() (int64, int64) { return sizeRw, sizeRootfs } -func (container *Container) Copy(resource string) (io.ReadCloser, error) { - if err := container.Mount(); err != nil { - return nil, err +func (container *Container) Checkpoint(opts *libcontainer.CriuOpts) error { + return container.daemon.Checkpoint(container, opts) +} + +// XXX Start() does a lot more. Not sure if we have +// to do everything it does. +func (container *Container) Restore(opts *libcontainer.CriuOpts) error { + var err error + + container.Lock() + defer container.Unlock() + + defer func() { + if err != nil { + container.cleanup() + } + }() + + if err = container.initializeNetworking(); err != nil { + return err } - basePath, err := container.getResourcePath(resource) + linkedEnv, err := container.setupLinkedContainers() if err != nil { - container.Unmount() - return nil, err + return err } - // Check if this is actually in a volume - for _, mnt := range container.VolumeMounts() { - if len(mnt.MountToPath) > 0 && strings.HasPrefix(resource, mnt.MountToPath[1:]) { - return mnt.Export(resource) - } + if err = container.setupWorkingDirectory(); err != nil { + return err + } + + env := container.createDaemonEnvironment(linkedEnv) + + if err = populateCommand(container, env); err != nil { + return err } - // Check if this is a special one (resolv.conf, hostname, ..) - if resource == "etc/resolv.conf" { - basePath = container.ResolvConfPath + return container.waitForRestore(opts) +} + +func (container *Container) Copy(resource string) (io.ReadCloser, error) { + container.Lock() + defer container.Unlock() + var err error + if err := container.Mount(); err != nil { + return nil, err } - if resource == "etc/hostname" { - basePath = container.HostnamePath + defer func() { + if err != nil { + container.Unmount() + } + }() + + if err = container.mountVolumes(); err != nil { + container.unmountVolumes() + return nil, err } - if resource == "etc/hosts" { - basePath = container.HostsPath + defer func() { + if err != nil { + container.unmountVolumes() + } + }() + + basePath, err := container.GetResourcePath(resource) + if err != nil { + return nil, err } stat, err := os.Stat(basePath) if err != nil { - container.Unmount() return nil, err } var filter []string @@ -1003,11 +1081,12 @@ func (container *Container) Copy(resource string) (io.ReadCloser, error) { IncludeFiles: filter, }) if err != nil { - container.Unmount() return nil, err } + return ioutils.NewReadCloserWrapper(archive, func() error { err := archive.Close() + container.unmountVolumes() container.Unmount() return err }), @@ -1020,14 +1099,6 @@ func (container *Container) Exposes(p nat.Port) bool { return exists } -func (container *Container) GetPtyMaster() (libcontainer.Console, error) { - ttyConsole, ok := container.command.ProcessConfig.Terminal.(execdriver.TtyTerminal) - if !ok { - return nil, ErrNoTTY - } - return ttyConsole.Master(), nil -} - func (container *Container) HostConfig() *runconfig.HostConfig { container.Lock() res := container.hostConfig @@ -1063,7 +1134,7 @@ func (container *Container) setupContainerDns() error { updatedResolvConf, modified := resolvconf.FilterResolvDns(latestResolvConf, container.daemon.config.Bridge.EnableIPv6) if modified { // changes have occurred during resolv.conf localhost cleanup: generate an updated hash - newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf)) + newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf)) if err != nil { return err } @@ -1088,7 +1159,7 @@ func (container *Container) setupContainerDns() error { if err != nil { return err } - container.ResolvConfPath, err = container.getRootResourcePath("resolv.conf") + container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") if err != nil { return err } @@ -1118,7 +1189,7 @@ func (container *Container) setupContainerDns() error { } //get a sha256 hash of the resolv conf at this point so we can check //for changes when the host resolv.conf changes (e.g. network update) - resolvHash, err := utils.HashData(bytes.NewReader(resolvConf)) + resolvHash, err := ioutils.HashData(bytes.NewReader(resolvConf)) if err != nil { return err } @@ -1150,7 +1221,7 @@ func (container *Container) updateResolvConf(updatedResolvConf []byte, newResolv if err != nil { return err } - curHash, err := utils.HashData(bytes.NewReader(resolvBytes)) + curHash, err := ioutils.HashData(bytes.NewReader(resolvBytes)) if err != nil { return err } @@ -1249,7 +1320,7 @@ func (container *Container) initializeNetworking() error { return err } - hostsPath, err := container.getRootResourcePath("hosts") + hostsPath, err := container.GetRootResourcePath("hosts") if err != nil { return err } @@ -1282,13 +1353,13 @@ func (container *Container) initializeNetworking() error { // Make sure the config is compatible with the current kernel func (container *Container) verifyDaemonSettings() { - if container.Config.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { + if container.hostConfig.Memory > 0 && !container.daemon.sysInfo.MemoryLimit { logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.") - container.Config.Memory = 0 + container.hostConfig.Memory = 0 } - if container.Config.Memory > 0 && !container.daemon.sysInfo.SwapLimit { + if container.hostConfig.Memory > 0 && container.hostConfig.MemorySwap != -1 && !container.daemon.sysInfo.SwapLimit { logrus.Warnf("Your kernel does not support swap limit capabilities. Limitation discarded.") - container.Config.MemorySwap = -1 + container.hostConfig.MemorySwap = -1 } if container.daemon.sysInfo.IPv4ForwardingDisabled { logrus.Warnf("IPv4 forwarding is disabled. Networking will not work") @@ -1328,7 +1399,7 @@ func (container *Container) setupLinkedContainers() ([]string, error) { linkAlias, child.Config.Env, child.Config.ExposedPorts, - daemon.eng) + ) if err != nil { rollback() @@ -1380,7 +1451,7 @@ func (container *Container) setupWorkingDirectory() error { if container.Config.WorkingDir != "" { container.Config.WorkingDir = path.Clean(container.Config.WorkingDir) - pth, err := container.getResourcePath(container.Config.WorkingDir) + pth, err := container.GetResourcePath(container.Config.WorkingDir) if err != nil { return err } @@ -1414,6 +1485,7 @@ func (container *Container) startLogging() error { if err != nil { return err } + container.LogPath = pth dl, err := jsonfilelog.New(pth) if err != nil { @@ -1426,6 +1498,12 @@ func (container *Container) startLogging() error { return err } l = dl + case "journald": + dl, err := journald.New(container.ID[:12]) + if err != nil { + return err + } + l = dl case "none": return nil default: @@ -1454,6 +1532,37 @@ func (container *Container) waitForStart() error { return err } + // FIXME? We should write to the disk after actually starting up + // becase StdFds cannot be initialized before + container.toDisk() + + return nil +} + +// Like waitForStart() but for restoring a container. +// +// XXX Does RestartPolicy apply here? +func (container *Container) waitForRestore(opts *libcontainer.CriuOpts) error { + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // After calling promise.Go() we'll have two goroutines: + // - The current goroutine that will block in the select + // below until restore is done. + // - A new goroutine that will restore the container and + // wait for it to exit. + select { + case <-container.monitor.restoreSignal: + if container.ExitCode != 0 { + return fmt.Errorf("restore process failed") + } + case err := <-promise.Go(func() error { return container.monitor.Restore(opts) }): + return err + } + + // FIXME? We should write to the disk after actually starting up + // becase StdFds cannot be initialized before + container.toDisk() + return nil } @@ -1464,7 +1573,8 @@ func (container *Container) allocatePort(eng *engine.Engine, port nat.Port, bind } for i := 0; i < len(binding); i++ { - b, err := bridge.AllocatePort(container.ID, port, binding[i]) + b, err := bridge.AllocatePort(container.ID, port, binding[i], container.IsCheckpointed()) + if err != nil { return err } @@ -1513,6 +1623,9 @@ func (container *Container) getNetworkedContainer() (*Container, error) { if err != nil { return nil, err } + if container == nc { + return nil, fmt.Errorf("cannot join own network") + } if !nc.IsRunning() { return nil, fmt.Errorf("cannot join network of a non running container: %s", parts[1]) } diff --git a/daemon/create.go b/daemon/create.go index c820201e42c79..db60355071866 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -2,9 +2,7 @@ package daemon import ( "fmt" - "strings" - "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/pkg/parsers" @@ -12,36 +10,10 @@ import ( "github.com/docker/libcontainer/label" ) -func (daemon *Daemon) ContainerCreate(job *engine.Job) error { - var name string - if len(job.Args) == 1 { - name = job.Args[0] - } else if len(job.Args) > 1 { - return fmt.Errorf("Usage: %s", job.Name) - } - - config := runconfig.ContainerConfigFromJob(job) - hostConfig := runconfig.ContainerHostConfigFromJob(job) - - if len(hostConfig.LxcConf) > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { - return fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) - } - if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { - return fmt.Errorf("Minimum memory limit allowed is 4MB") - } - if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { - job.Errorf("Your kernel does not support memory limit capabilities. Limitation discarded.\n") - hostConfig.Memory = 0 - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { - job.Errorf("Your kernel does not support swap limit capabilities. Limitation discarded.\n") - hostConfig.MemorySwap = -1 - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { - return fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.\n") - } - if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { - return fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.\n") +func (daemon *Daemon) ContainerCreate(name string, config *runconfig.Config, hostConfig *runconfig.HostConfig) (string, []string, error) { + warnings, err := daemon.verifyHostConfig(hostConfig) + if err != nil { + return "", warnings, err } container, buildWarnings, err := daemon.Create(config, hostConfig, name) @@ -51,22 +23,15 @@ func (daemon *Daemon) ContainerCreate(job *engine.Job) error { if tag == "" { tag = graph.DEFAULTTAG } - return fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag) + return "", warnings, fmt.Errorf("No such image: %s (tag: %s)", config.Image, tag) } - return err - } - if !container.Config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { - job.Errorf("IPv4 forwarding is disabled.\n") + return "", warnings, err } - container.LogEvent("create") - - job.Printf("%s\n", container.ID) - for _, warning := range buildWarnings { - job.Errorf("%s\n", warning) - } + container.LogEvent("create") + warnings = append(warnings, buildWarnings...) - return nil + return container.ID, warnings, nil } // Create creates a new container from the given configuration with a given name. @@ -93,6 +58,9 @@ func (daemon *Daemon) Create(config *runconfig.Config, hostConfig *runconfig.Hos if warnings, err = daemon.mergeAndVerifyConfig(config, img); err != nil { return nil, nil, err } + if !config.NetworkDisabled && daemon.SystemConfig().IPv4ForwardingDisabled { + warnings = append(warnings, "IPv4 forwarding is disabled.\n") + } if hostConfig == nil { hostConfig = &runconfig.HostConfig{} } diff --git a/daemon/daemon.go b/daemon/daemon.go index 36d05cd92c12e..7df57085cb84c 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -32,6 +32,7 @@ import ( "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/broadcastwriter" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/graphdb" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/namesgenerator" @@ -47,6 +48,7 @@ import ( "github.com/docker/docker/trust" "github.com/docker/docker/utils" "github.com/docker/docker/volumes" + "github.com/docker/libcontainer" "github.com/go-fsnotify/fsnotify" ) @@ -107,7 +109,6 @@ type Daemon struct { containerGraph *graphdb.Database driver graphdriver.Driver execDriver execdriver.Driver - trustStore *trust.TrustStore statsCollector *statsCollector defaultLogConfig runconfig.LogConfig RegistryService *registry.Service @@ -116,36 +117,6 @@ type Daemon struct { // Install installs daemon capabilities to eng. func (daemon *Daemon) Install(eng *engine.Engine) error { - for name, method := range map[string]engine.Handler{ - "commit": daemon.ContainerCommit, - "container_inspect": daemon.ContainerInspect, - "container_stats": daemon.ContainerStats, - "create": daemon.ContainerCreate, - "export": daemon.ContainerExport, - "info": daemon.CmdInfo, - "kill": daemon.ContainerKill, - "logs": daemon.ContainerLogs, - "resize": daemon.ContainerResize, - "restart": daemon.ContainerRestart, - "start": daemon.ContainerStart, - "stop": daemon.ContainerStop, - "top": daemon.ContainerTop, - "wait": daemon.ContainerWait, - "execCreate": daemon.ContainerExecCreate, - "execStart": daemon.ContainerExecStart, - "execResize": daemon.ContainerExecResize, - "execInspect": daemon.ContainerExecInspect, - } { - if err := eng.Register(name, method); err != nil { - return err - } - } - if err := daemon.Repositories().Install(eng); err != nil { - return err - } - if err := daemon.trustStore.Install(eng); err != nil { - return err - } // FIXME: this hack is necessary for legacy integration tests to access // the daemon object. eng.HackSetGlobalVar("httpapi.daemon", daemon) @@ -208,8 +179,6 @@ func (daemon *Daemon) load(id string) (*Container, error) { return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) } - container.readHostConfig() - return container, nil } @@ -257,7 +226,6 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err if container.IsRunning() { logrus.Debugf("killing old running container %s", container.ID) - existingPid := container.Pid container.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) // We only have to handle this for lxc because the other drivers will ensure that @@ -269,11 +237,6 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err cmd := &execdriver.Command{ ID: container.ID, } - var err error - cmd.ProcessConfig.Process, err = os.FindProcess(existingPid) - if err != nil { - logrus.Debugf("cannot find existing process for %d", existingPid) - } daemon.execDriver.Terminate(cmd) } @@ -283,19 +246,8 @@ func (daemon *Daemon) register(container *Container, updateSuffixarray bool) err if err := container.ToDisk(); err != nil { logrus.Debugf("saving stopped state to disk %s", err) } - - info := daemon.execDriver.Info(container.ID) - if !info.IsRunning() { - logrus.Debugf("Container %s was supposed to be running but is not.", container.ID) - - logrus.Debug("Marking as stopped") - - container.SetStopped(&execdriver.ExitStatus{ExitCode: -127}) - if err := container.ToDisk(); err != nil { - return err - } - } } + return nil } @@ -345,6 +297,21 @@ func (daemon *Daemon) restore() error { logrus.Debugf("Loaded container %v", container.ID) containers[container.ID] = container + + // If the container was checkpointed, we need to reserve + // the IP address that it was using. + // + // XXX We should also reserve host ports (if any). + if container.IsCheckpointed() { + logrus.Debugf("\ncontainer %s was checkpointed", container.ID) + err = bridge.ReserveIP(container.ID, container.NetworkSettings.IPAddress) + if err != nil { + logrus.Errorf("Failed to reserve IP %s for container %s", + container.ID, container.NetworkSettings.IPAddress) + } + } else { + logrus.Debugf("IP was not reserved in restore()") + } } else { logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } @@ -442,7 +409,7 @@ func (daemon *Daemon) setupResolvconfWatcher() error { updatedResolvConf, modified := resolvconf.FilterResolvDns(updatedResolvConf, daemon.config.Bridge.EnableIPv6) if modified { // changes have occurred during localhost cleanup: generate an updated hash - newHash, err := utils.HashData(bytes.NewReader(updatedResolvConf)) + newHash, err := ioutils.HashData(bytes.NewReader(updatedResolvConf)) if err != nil { logrus.Debugf("Error generating hash of new resolv.conf: %v", err) } else { @@ -493,7 +460,7 @@ func (daemon *Daemon) mergeAndVerifyConfig(config *runconfig.Config, img *image. return nil, err } } - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { + if config.Entrypoint.Len() == 0 && config.Cmd.Len() == 0 { return nil, fmt.Errorf("No command specified") } return warnings, nil @@ -585,17 +552,20 @@ func (daemon *Daemon) generateHostname(id string, config *runconfig.Config) { } } -func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint, configCmd []string) (string, []string) { +func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint *runconfig.Entrypoint, configCmd *runconfig.Command) (string, []string) { var ( entrypoint string args []string ) - if len(configEntrypoint) != 0 { - entrypoint = configEntrypoint[0] - args = append(configEntrypoint[1:], configCmd...) + + cmdSlice := configCmd.Slice() + if configEntrypoint.Len() != 0 { + eSlice := configEntrypoint.Slice() + entrypoint = eSlice[0] + args = append(eSlice[1:], cmdSlice...) } else { - entrypoint = configCmd[0] - args = configCmd[1:] + entrypoint = cmdSlice[0] + args = cmdSlice[1:] } return entrypoint, args } @@ -841,7 +811,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService if err != nil { return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) } - realTmp, err := utils.ReadSymlinkedDirectory(tmp) + realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) if err != nil { return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) } @@ -852,7 +822,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { realRoot = config.Root } else { - realRoot, err = utils.ReadSymlinkedDirectory(config.Root) + realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) if err != nil { return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) } @@ -869,7 +839,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService // Load storage driver driver, err := graphdriver.New(config.Root, config.GraphOptions) if err != nil { - return nil, fmt.Errorf("error intializing graphdriver: %v", err) + return nil, fmt.Errorf("error initializing graphdriver: %v", err) } logrus.Debugf("Using graph driver %s", driver) // register cleanup for graph driver @@ -925,25 +895,32 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService return nil, err } - eventsService := events.New() - logrus.Debug("Creating repository list") - repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), g, trustKey, registryService, eventsService) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store: %s", err) - } - trustDir := path.Join(config.Root, "trust") if err := os.MkdirAll(trustDir, 0700); err != nil && !os.IsExist(err) { return nil, err } - t, err := trust.NewTrustStore(trustDir) + trustService, err := trust.NewTrustStore(trustDir) if err != nil { return nil, fmt.Errorf("could not create trust store: %s", err) } + eventsService := events.New() + logrus.Debug("Creating repository list") + tagCfg := &graph.TagStoreConfig{ + Graph: g, + Key: trustKey, + Registry: registryService, + Events: eventsService, + Trust: trustService, + } + repositories, err := graph.NewTagStore(path.Join(config.Root, "repositories-"+driver.String()), tagCfg) + if err != nil { + return nil, fmt.Errorf("Couldn't create Tag store: %s", err) + } + if !config.DisableNetwork { if err := bridge.InitDriver(&config.Bridge); err != nil { - return nil, err + return nil, fmt.Errorf("Error initializing Bridge: %v", err) } } @@ -962,7 +939,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService localCopy := path.Join(config.Root, "init", fmt.Sprintf("dockerinit-%s", dockerversion.VERSION)) sysInitPath := utils.DockerInitPath(localCopy) if sysInitPath == "" { - return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See http://docs.docker.com/contributing/devenvironment for official build instructions.") + return nil, fmt.Errorf("Could not locate dockerinit: This usually means docker was built incorrectly. See https://docs.docker.com/contributing/devenvironment for official build instructions.") } if sysInitPath != localCopy { @@ -970,7 +947,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService if err := os.Mkdir(path.Dir(localCopy), 0700); err != nil && !os.IsExist(err) { return nil, err } - if _, err := utils.CopyFile(sysInitPath, localCopy); err != nil { + if _, err := fileutils.CopyFile(sysInitPath, localCopy); err != nil { return nil, err } if err := os.Chmod(localCopy, 0700); err != nil { @@ -981,7 +958,7 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService sysInfo := sysinfo.New(false) const runDir = "/var/run/docker" - ed, err := execdrivers.NewDriver(config.ExecDriver, runDir, config.Root, sysInitPath, sysInfo) + ed, err := execdrivers.NewDriver(config.ExecDriver, config.ExecOptions, runDir, config.Root, sysInitPath, sysInfo) if err != nil { return nil, err } @@ -1002,7 +979,6 @@ func NewDaemonFromDirectory(config *Config, eng *engine.Engine, registryService sysInitPath: sysInitPath, execDriver: ed, eng: eng, - trustStore: t, statsCollector: newStatsCollector(1 * time.Second), defaultLogConfig: config.LogConfig, RegistryService: registryService, @@ -1101,6 +1077,25 @@ func (daemon *Daemon) Unpause(c *Container) error { return nil } +func (daemon *Daemon) Checkpoint(c *Container, opts *libcontainer.CriuOpts) error { + if err := daemon.execDriver.Checkpoint(c.command, opts); err != nil { + return err + } + c.SetCheckpointed(opts.LeaveRunning) + return nil +} + +func (daemon *Daemon) Restore(c *Container, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts) (execdriver.ExitStatus, error) { + // Mount the container's filesystem (daemon/graphdriver/aufs/aufs.go). + _, err := daemon.driver.Get(c.ID, c.GetMountLabel()) + if err != nil { + return execdriver.ExitStatus{ExitCode: 0}, err + } + + exitCode, err := daemon.execDriver.Restore(c.command, pipes, restoreCallback, opts) + return exitCode, err +} + func (daemon *Daemon) Kill(c *Container, sig int) error { return daemon.execDriver.Kill(c.command, sig) } @@ -1221,8 +1216,7 @@ func tempDir(rootDir string) (string, error) { if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { tmpDir = filepath.Join(rootDir, "tmp") } - err := os.MkdirAll(tmpDir, 0700) - return tmpDir, err + return tmpDir, os.MkdirAll(tmpDir, 0700) } func checkKernel() error { @@ -1232,7 +1226,7 @@ func checkKernel() error { // Unfortunately we can't test for the feature "does not cause a kernel panic" // without actually causing a kernel panic, so we need this workaround until // the circumstances of pre-3.8 crashes are clearer. - // For details see http://github.com/docker/docker/issues/407 + // For details see https://github.com/docker/docker/issues/407 if k, err := kernel.GetKernelVersion(); err != nil { logrus.Warnf("%s", err) } else { @@ -1244,3 +1238,56 @@ func checkKernel() error { } return nil } + +func (daemon *Daemon) verifyHostConfig(hostConfig *runconfig.HostConfig) ([]string, error) { + var warnings []string + + if hostConfig == nil { + return warnings, nil + } + + if hostConfig.LxcConf.Len() > 0 && !strings.Contains(daemon.ExecutionDriver().Name(), "lxc") { + return warnings, fmt.Errorf("Cannot use --lxc-conf with execdriver: %s", daemon.ExecutionDriver().Name()) + } + if hostConfig.Memory != 0 && hostConfig.Memory < 4194304 { + return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") + } + if hostConfig.Memory > 0 && !daemon.SystemConfig().MemoryLimit { + warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") + hostConfig.Memory = 0 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !daemon.SystemConfig().SwapLimit { + warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") + hostConfig.MemorySwap = -1 + } + if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory { + return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.") + } + if hostConfig.Memory == 0 && hostConfig.MemorySwap > 0 { + return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage.") + } + if hostConfig.CpuQuota > 0 && !daemon.SystemConfig().CpuCfsQuota { + warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") + hostConfig.CpuQuota = 0 + } + + return warnings, nil +} + +func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { + container.Lock() + defer container.Unlock() + if err := parseSecurityOpt(container, hostConfig); err != nil { + return err + } + + // Register any links from the host config before starting the container + if err := daemon.RegisterLinks(container, hostConfig); err != nil { + return err + } + + container.hostConfig = hostConfig + container.toDisk() + + return nil +} diff --git a/daemon/delete.go b/daemon/delete.go index d398741d75678..464193b283ab1 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -129,6 +129,7 @@ func (daemon *Daemon) commonRm(container *Container, forceRemove bool) (err erro if err != nil && forceRemove { daemon.idIndex.Delete(container.ID) daemon.containers.Delete(container.ID) + os.RemoveAll(container.root) } }() diff --git a/daemon/exec.go b/daemon/exec.go index f91600da7aa48..9aa102690f4ac 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -10,7 +10,6 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/daemon/execdriver/lxc" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/broadcastwriter" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/promise" @@ -111,33 +110,26 @@ func (d *Daemon) getActiveContainer(name string) (*Container, error) { return container, nil } -func (d *Daemon) ContainerExecCreate(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s [options] container command [args]", job.Name) - } +func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) { if strings.HasPrefix(d.execDriver.Name(), lxc.DriverName) { - return lxc.ErrExec - } - - var name = job.Args[0] - - container, err := d.getActiveContainer(name) - if err != nil { - return err + return "", lxc.ErrExec } - config, err := runconfig.ExecConfigFromJob(job) + container, err := d.getActiveContainer(config.Container) if err != nil { - return err + return "", err } - entrypoint, args := d.getEntrypointAndArgs(nil, config.Cmd) + cmd := runconfig.NewCommand(config.Cmd...) + entrypoint, args := d.getEntrypointAndArgs(runconfig.NewEntrypoint(), cmd) processConfig := execdriver.ProcessConfig{ Tty: config.Tty, Entrypoint: entrypoint, Arguments: args, + User: config.User, + Privileged: config.Privileged, } execConfig := &execConfig{ @@ -155,20 +147,15 @@ func (d *Daemon) ContainerExecCreate(job *engine.Job) error { d.registerExecCommand(execConfig) - job.Printf("%s\n", execConfig.ID) + return execConfig.ID, nil - return nil } -func (d *Daemon) ContainerExecStart(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s [options] exec", job.Name) - } +func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { var ( cStdin io.ReadCloser cStdout, cStderr io.Writer - execName = job.Args[0] ) execConfig, err := d.getExecConfig(execName) @@ -198,15 +185,15 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) error { go func() { defer w.Close() defer logrus.Debugf("Closing buffered stdin pipe") - io.Copy(w, job.Stdin) + io.Copy(w, stdin) }() cStdin = r } if execConfig.OpenStdout { - cStdout = job.Stdout + cStdout = stdout } if execConfig.OpenStderr { - cStderr = job.Stderr + cStderr = stderr } execConfig.StreamConfig.stderr = broadcastwriter.New() @@ -227,8 +214,7 @@ func (d *Daemon) ContainerExecStart(job *engine.Job) error { // the exitStatus) even after the cmd is done running. go func() { - err := container.Exec(execConfig) - if err != nil { + if err := container.Exec(execConfig); err != nil { execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) } }() diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index 637f7d779e441..ccbf8eb76ace5 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -22,6 +22,7 @@ import ( // Context is a generic key value pair that allows // arbatrary data to be sent type Context map[string]string +type RestoreCallback func(*ProcessConfig, int) var ( ErrNotRunning = errors.New("Container is not running") @@ -66,6 +67,8 @@ type Driver interface { Kill(c *Command, sig int) error Pause(c *Command) error Unpause(c *Command) error + Checkpoint(c *Command, opts *libcontainer.CriuOpts) error + Restore(c *Command, pipes *Pipes, restoreCallback RestoreCallback, opts *libcontainer.CriuOpts) (ExitStatus, error) Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. @@ -110,6 +113,8 @@ type Resources struct { MemorySwap int64 `json:"memory_swap"` CpuShares int64 `json:"cpu_shares"` CpusetCpus string `json:"cpuset_cpus"` + CpusetMems string `json:"cpuset_mems"` + CpuQuota int64 `json:"cpu_quota"` Rlimits []*ulimit.Rlimit `json:"rlimits"` } @@ -204,6 +209,8 @@ func SetupCgroups(container *configs.Config, c *Command) error { container.Cgroups.MemoryReservation = c.Resources.Memory container.Cgroups.MemorySwap = c.Resources.MemorySwap container.Cgroups.CpusetCpus = c.Resources.CpusetCpus + container.Cgroups.CpusetMems = c.Resources.CpusetMems + container.Cgroups.CpuQuota = c.Resources.CpuQuota } return nil diff --git a/daemon/execdriver/execdrivers/execdrivers.go b/daemon/execdriver/execdrivers/execdrivers.go index f6f97c930266d..dde0be1f0f463 100644 --- a/daemon/execdriver/execdrivers/execdrivers.go +++ b/daemon/execdriver/execdrivers/execdrivers.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/pkg/sysinfo" ) -func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { +func NewDriver(name string, options []string, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) (execdriver.Driver, error) { switch name { case "lxc": // we want to give the lxc driver the full docker root because it needs @@ -18,7 +18,7 @@ func NewDriver(name, root, libPath, initPath string, sysInfo *sysinfo.SysInfo) ( // to be backwards compatible return lxc.NewDriver(root, libPath, initPath, sysInfo.AppArmor) case "native": - return native.NewDriver(path.Join(root, "execdriver", "native"), initPath) + return native.NewDriver(path.Join(root, "execdriver", "native"), initPath, options) } return nil, fmt.Errorf("unknown exec driver %s", name) } diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 97b34bb678586..7dd2cd1ccaed5 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -18,10 +18,10 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/stringutils" sysinfo "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term" "github.com/docker/docker/pkg/version" - "github.com/docker/docker/utils" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" "github.com/docker/libcontainer/configs" @@ -85,16 +85,21 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba dataPath = d.containerDir(c.ID) ) + container, err := d.createContainer(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } - c.ProcessConfig.Terminal = term - container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } + c.ProcessConfig.Terminal = term + d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, @@ -187,7 +192,7 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba // without exec in go we have to do this horrible shell hack... shellString := "mount --make-rslave /; exec " + - utils.ShellQuoteArguments(params) + stringutils.ShellQuoteArguments(params) params = []string{ "unshare", "-m", "--", "/bin/sh", "-c", shellString, @@ -488,6 +493,14 @@ func (d *driver) Unpause(c *execdriver.Command) error { return err } +func (d *driver) Checkpoint(c *execdriver.Command, opts *libcontainer.CriuOpts) error { + return fmt.Errorf("Checkpointing lxc containers not supported yet\n") +} + +func (d *driver) Restore(c *execdriver.Command, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts) (execdriver.ExitStatus, error) { + return execdriver.ExitStatus{ExitCode: 0}, fmt.Errorf("Restoring lxc containers not supported yet\n") +} + func (d *driver) Terminate(c *execdriver.Command) error { return KillLxc(c.ID, 9) } diff --git a/daemon/execdriver/lxc/init.go b/daemon/execdriver/lxc/init.go index e99502667d7b9..eca1c02e21d68 100644 --- a/daemon/execdriver/lxc/init.go +++ b/daemon/execdriver/lxc/init.go @@ -4,7 +4,6 @@ import ( "encoding/json" "flag" "fmt" - "io/ioutil" "log" "os" "os/exec" @@ -107,12 +106,13 @@ func getArgs() *InitArgs { func setupEnv(args *InitArgs) error { // Get env var env []string - content, err := ioutil.ReadFile(".dockerenv") + dockerenv, err := os.Open(".dockerenv") if err != nil { return fmt.Errorf("Unable to load environment variables: %v", err) } - if err := json.Unmarshal(content, &env); err != nil { - return fmt.Errorf("Unable to unmarshal environment variables: %v", err) + defer dockerenv.Close() + if err := json.NewDecoder(dockerenv).Decode(&env); err != nil { + return fmt.Errorf("Unable to decode environment variables: %v", err) } // Propagate the plugin-specific container env variable env = append(env, "container="+os.Getenv("container")) @@ -141,13 +141,3 @@ func setupWorkingDirectory(args *InitArgs) error { } return nil } - -func getEnv(args *InitArgs, key string) string { - for _, kv := range args.Env { - parts := strings.SplitN(kv, "=", 2) - if parts[0] == key && len(parts) == 2 { - return parts[1] - } - } - return "" -} diff --git a/daemon/execdriver/lxc/lxc_template.go b/daemon/execdriver/lxc/lxc_template.go index 02313d465ad59..b3be7f8c51884 100644 --- a/daemon/execdriver/lxc/lxc_template.go +++ b/daemon/execdriver/lxc/lxc_template.go @@ -9,7 +9,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" nativeTemplate "github.com/docker/docker/daemon/execdriver/native/template" - "github.com/docker/docker/utils" + "github.com/docker/docker/pkg/stringutils" "github.com/docker/libcontainer/label" ) @@ -62,7 +62,7 @@ lxc.pivotdir = lxc_putold # NOTICE: These mounts must be applied within the namespace {{if .ProcessConfig.Privileged}} # WARNING: mounting procfs and/or sysfs read-write is a known attack vector. -# See e.g. http://blog.zx2c4.com/749 and http://bit.ly/T9CkqJ +# See e.g. http://blog.zx2c4.com/749 and https://bit.ly/T9CkqJ # We mount them read-write here, but later, dockerinit will call the Restrict() function to remount them read-only. # We cannot mount them directly read-only, because that would prevent loading AppArmor profiles. lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noexec 0 0 @@ -110,6 +110,12 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}} {{if .Resources.CpusetCpus}} lxc.cgroup.cpuset.cpus = {{.Resources.CpusetCpus}} {{end}} +{{if .Resources.CpusetMems}} +lxc.cgroup.cpuset.mems = {{.Resources.CpusetMems}} +{{end}} +{{if .Resources.CpuQuota}} +lxc.cgroup.cpu.cfs_quota_us = {{.Resources.CpuQuota}} +{{end}} {{end}} {{if .LxcConfig}} @@ -177,7 +183,7 @@ func keepCapabilities(adds []string, drops []string) ([]string, error) { } func dropList(drops []string) ([]string, error) { - if utils.StringsContainsNoCase(drops, "all") { + if stringutils.InSlice(drops, "all") { var newCaps []string for _, capName := range execdriver.GetAllCapabilities() { cap := execdriver.GetCapability(capName) diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index e5811bb852a37..727d142c187d2 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -15,6 +15,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" + "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/reexec" sysinfo "github.com/docker/docker/pkg/system" "github.com/docker/docker/pkg/term" @@ -40,7 +41,19 @@ type driver struct { sync.Mutex } -func NewDriver(root, initPath string) (*driver, error) { +// FIXME: move this into libcontainer +// InitFactory returns an options func to configure a LinuxFactory with the +// provided absolute path to the init binary and arguements and a path to criu +func InitFactory(criuPath string, path string, args ...string) func(*libcontainer.LinuxFactory) error { + return func(l *libcontainer.LinuxFactory) error { + l.CriuPath = criuPath + l.InitPath = path + l.InitArgs = args + return nil + } +} + +func NewDriver(root, initPath string, options []string) (*driver, error) { meminfo, err := sysinfo.ReadMemInfo() if err != nil { return nil, err @@ -53,16 +66,51 @@ func NewDriver(root, initPath string) (*driver, error) { if err := apparmor.InstallDefaultProfile(); err != nil { return nil, err } + + // choose cgroup manager + // this makes sure there are no breaking changes to people + // who upgrade from versions without native.cgroupdriver opt cgm := libcontainer.Cgroupfs if systemd.UseSystemd() { cgm = libcontainer.SystemdCgroups } + // parse the options + for _, option := range options { + key, val, err := parsers.ParseKeyValueOpt(option) + if err != nil { + return nil, err + } + key = strings.ToLower(key) + switch key { + case "native.cgroupdriver": + // override the default if they set options + switch val { + case "systemd": + if systemd.UseSystemd() { + cgm = libcontainer.SystemdCgroups + } else { + // warn them that they chose the wrong driver + logrus.Warn("You cannot use systemd as native.cgroupdriver, using cgroupfs instead") + } + case "cgroupfs": + cgm = libcontainer.Cgroupfs + default: + return nil, fmt.Errorf("Unknown native.cgroupdriver given %q. try cgroupfs or systemd", val) + } + default: + return nil, fmt.Errorf("Unknown option %s\n", key) + } + } + + logrus.Debugf("Using %v as native.cgroupdriver", cgm) + f, err := libcontainer.New( root, cgm, - libcontainer.InitPath(reexec.Self(), DriverName), + InitFactory("criu", reexec.Self(), DriverName), ) + if err != nil { return nil, err } @@ -88,8 +136,6 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba return execdriver.ExitStatus{ExitCode: -1}, err } - var term execdriver.Terminal - p := &libcontainer.Process{ Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), Env: c.ProcessConfig.Env, @@ -97,36 +143,9 @@ func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallba User: c.ProcessConfig.User, } - if c.ProcessConfig.Tty { - rootuid, err := container.HostUID() - if err != nil { - return execdriver.ExitStatus{ExitCode: -1}, err - } - cons, err := p.NewConsole(rootuid) - if err != nil { - return execdriver.ExitStatus{ExitCode: -1}, err - } - term, err = NewTtyConsole(cons, pipes, rootuid) - } else { - p.Stdout = pipes.Stdout - p.Stderr = pipes.Stderr - r, w, err := os.Pipe() - if err != nil { - return execdriver.ExitStatus{ExitCode: -1}, err - } - if pipes.Stdin != nil { - go func() { - io.Copy(w, pipes.Stdin) - w.Close() - }() - p.Stdin = r - } - term = &execdriver.StdConsole{} - } - if err != nil { + if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } - c.ProcessConfig.Terminal = term cont, err := d.factory.Create(c.ID, container) if err != nil { @@ -188,6 +207,34 @@ func notifyOnOOM(container libcontainer.Container) <-chan struct{} { return oom } +func killCgroupProcs(c libcontainer.Container) { + var procs []*os.Process + if err := c.Pause(); err != nil { + logrus.Warn(err) + } + pids, err := c.Processes() + if err != nil { + // don't care about childs if we can't get them, this is mostly because cgroup already deleted + logrus.Warnf("Failed to get processes from container %s: %v", c.ID(), err) + } + for _, pid := range pids { + if p, err := os.FindProcess(pid); err == nil { + procs = append(procs, p) + if err := p.Kill(); err != nil { + logrus.Warn(err) + } + } + } + if err := c.Resume(); err != nil { + logrus.Warn(err) + } + for _, p := range procs { + if _, err := p.Wait(); err != nil { + logrus.Warn(err) + } + } +} + func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { return func() (*os.ProcessState, error) { pid, err := p.Pid() @@ -195,8 +242,6 @@ func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*o return nil, err } - processes, err := c.Processes() - process, err := os.FindProcess(pid) s, err := process.Wait() if err != nil { @@ -206,19 +251,7 @@ func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*o } s = execErr.ProcessState } - if err != nil { - return s, err - } - - for _, pid := range processes { - process, err := os.FindProcess(pid) - if err != nil { - logrus.Errorf("Failed to kill process: %d", pid) - continue - } - process.Kill() - } - + killCgroupProcs(c) p.Wait() return s, err } @@ -252,31 +285,148 @@ func (d *driver) Unpause(c *execdriver.Command) error { return active.Resume() } -func (d *driver) Terminate(c *execdriver.Command) error { - defer d.cleanContainer(c.ID) - // lets check the start time for the process +func (d *driver) Checkpoint(c *execdriver.Command, opts *libcontainer.CriuOpts) error { active := d.activeContainers[c.ID] if active == nil { return fmt.Errorf("active container for %s does not exist", c.ID) } - state, err := active.State() + + d.Lock() + defer d.Unlock() + err := active.Checkpoint(opts) if err != nil { return err } - pid := state.InitProcessPid + return nil +} + +func (d *driver) Restore(c *execdriver.Command, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts) (execdriver.ExitStatus, error) { + cont, err := d.factory.Load(c.ID) + + p := &libcontainer.Process{ + Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), + Env: c.ProcessConfig.Env, + Cwd: c.WorkingDir, + User: c.ProcessConfig.User, + } + + var term execdriver.Terminal + + if c.ProcessConfig.Tty { + rootuid, err := cont.Config().HostUID() + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + cons, err := p.NewConsole(rootuid) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + term, err = NewTtyConsole(cons, pipes, rootuid) + } else { + p.Stdout = pipes.Stdout + p.Stderr = pipes.Stderr + r, w, err := os.Pipe() + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + if pipes.Stdin != nil { + go func() { + io.Copy(w, pipes.Stdin) + w.Close() + }() + p.Stdin = r + } + term = &execdriver.StdConsole{} + } + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + c.ProcessConfig.Terminal = term + + + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + d.Lock() + d.activeContainers[c.ID] = cont + d.Unlock() + defer func() { + cont.Destroy() + d.cleanContainer(c.ID) + }() + + + // Since the CRIU binary exits after restoring the container, we + // need to reap its child by setting PR_SET_CHILD_SUBREAPER (36) + // so that it'll be owned by this process (Docker daemon) after restore. + // + // XXX This really belongs to where the Docker daemon starts. + if _, _, syserr := syscall.RawSyscall(syscall.SYS_PRCTL, 36, 1, 0); syserr != 0 { + return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Could not set PR_SET_CHILD_SUBREAPER (syserr %d)", syserr) + } + + if err := cont.Restore(p, opts); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + // FIXME: no idea if any of this is needed... + if restoreCallback != nil { + pid, err := p.Pid() + if err != nil { + p.Signal(os.Kill) + p.Wait() + return execdriver.ExitStatus{ExitCode: -1}, err + } + restoreCallback(&c.ProcessConfig, pid) + } + + oom := notifyOnOOM(cont) + waitF := p.Wait + if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { + // we need such hack for tracking processes with inherited fds, + // because cmd.Wait() waiting for all streams to be copied + waitF = waitInPIDHost(p, cont) + } + ps, err := waitF() + if err != nil { + execErr, ok := err.(*exec.ExitError) + if !ok { + return execdriver.ExitStatus{ExitCode: -1}, err + } + ps = execErr.ProcessState + } + + cont.Destroy() + _, oomKill := <-oom + return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil +} + + + +func (d *driver) Terminate(c *execdriver.Command) error { + defer d.cleanContainer(c.ID) + container, err := d.factory.Load(c.ID) + if err != nil { + return err + } + defer container.Destroy() + state, err := container.State() + if err != nil { + return err + } + pid := state.InitProcessPid currentStartTime, err := system.GetProcessStartTime(pid) if err != nil { return err } - if state.InitProcessStartTime == currentStartTime { err = syscall.Kill(pid, 9) syscall.Wait4(pid, nil, 0, nil) } - return err - } func (d *driver) Info(id string) execdriver.Info { @@ -339,16 +489,6 @@ func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { }, nil } -func getEnv(key string, env []string) string { - for _, pair := range env { - parts := strings.Split(pair, "=") - if parts[0] == key { - return parts[1] - } - } - return "" -} - type TtyConsole struct { console libcontainer.Console } @@ -399,3 +539,40 @@ func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error { func (t *TtyConsole) Close() error { return t.console.Close() } + +func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error { + var term execdriver.Terminal + var err error + + if processConfig.Tty { + rootuid, err := container.HostUID() + if err != nil { + return err + } + cons, err := p.NewConsole(rootuid) + if err != nil { + return err + } + term, err = NewTtyConsole(cons, pipes, rootuid) + } else { + p.Stdout = pipes.Stdout + p.Stderr = pipes.Stderr + r, w, err := os.Pipe() + if err != nil { + return err + } + if pipes.Stdin != nil { + go func() { + io.Copy(w, pipes.Stdin) + w.Close() + }() + p.Stdin = r + } + term = &execdriver.StdConsole{} + } + if err != nil { + return err + } + processConfig.Terminal = term + return nil +} diff --git a/daemon/execdriver/native/exec.go b/daemon/execdriver/native/exec.go index 2edd3313b158b..dd41c0ad1da2b 100644 --- a/daemon/execdriver/native/exec.go +++ b/daemon/execdriver/native/exec.go @@ -14,46 +14,28 @@ import ( "github.com/docker/libcontainer/utils" ) -// TODO(vishh): Add support for running in privileged mode and running as a different user. func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } - var term execdriver.Terminal - var err error - p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, - User: c.ProcessConfig.User, + User: processConfig.User, } - if processConfig.Tty { - config := active.Config() - rootuid, err := config.HostUID() - if err != nil { - return -1, err - } - cons, err := p.NewConsole(rootuid) - if err != nil { - return -1, err - } - term, err = NewTtyConsole(cons, pipes, rootuid) - } else { - p.Stdout = pipes.Stdout - p.Stderr = pipes.Stderr - p.Stdin = pipes.Stdin - term = &execdriver.StdConsole{} + if processConfig.Privileged { + p.Capabilities = execdriver.GetAllCapabilities() } - if err != nil { + + config := active.Config() + if err := setupPipes(&config, processConfig, p, pipes); err != nil { return -1, err } - processConfig.Terminal = term - if err := active.Start(p); err != nil { return -1, err } diff --git a/daemon/execdriver/native/utils.go b/daemon/execdriver/native/utils.go deleted file mode 100644 index a703926453e9d..0000000000000 --- a/daemon/execdriver/native/utils.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux - -package native - -//func findUserArgs() []string { -//for i, a := range os.Args { -//if a == "--" { -//return os.Args[i+1:] -//} -//} -//return []string{} -//} - -//// loadConfigFromFd loads a container's config from the sync pipe that is provided by -//// fd 3 when running a process -//func loadConfigFromFd() (*configs.Config, error) { -//var config *libcontainer.Config -//if err := json.NewDecoder(os.NewFile(3, "child")).Decode(&config); err != nil { -//return nil, err -//} -//return config, nil -//} diff --git a/daemon/execdriver/utils.go b/daemon/execdriver/utils.go index e1fc9b9014b53..407c4f4fa17b5 100644 --- a/daemon/execdriver/utils.go +++ b/daemon/execdriver/utils.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/docker/docker/utils" + "github.com/docker/docker/pkg/stringutils" "github.com/syndtr/gocapability/capability" ) @@ -89,17 +89,17 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { if strings.ToLower(cap) == "all" { continue } - if !utils.StringsContainsNoCase(allCaps, cap) { + if !stringutils.InSlice(allCaps, cap) { return nil, fmt.Errorf("Unknown capability drop: %q", cap) } } // handle --cap-add=all - if utils.StringsContainsNoCase(adds, "all") { + if stringutils.InSlice(adds, "all") { basics = allCaps } - if !utils.StringsContainsNoCase(drops, "all") { + if !stringutils.InSlice(drops, "all") { for _, cap := range basics { // skip `all` aready handled above if strings.ToLower(cap) == "all" { @@ -107,7 +107,7 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { } // if we don't drop `all`, add back all the non-dropped caps - if !utils.StringsContainsNoCase(drops, cap) { + if !stringutils.InSlice(drops, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } @@ -119,12 +119,12 @@ func TweakCapabilities(basics, adds, drops []string) ([]string, error) { continue } - if !utils.StringsContainsNoCase(allCaps, cap) { + if !stringutils.InSlice(allCaps, cap) { return nil, fmt.Errorf("Unknown capability to add: %q", cap) } // add cap if not already in the list - if !utils.StringsContainsNoCase(newCaps, cap) { + if !stringutils.InSlice(newCaps, cap) { newCaps = append(newCaps, strings.ToUpper(cap)) } } diff --git a/daemon/export.go b/daemon/export.go index b1417b932cc10..b94b6100cb71c 100644 --- a/daemon/export.go +++ b/daemon/export.go @@ -3,16 +3,9 @@ package daemon import ( "fmt" "io" - - "github.com/docker/docker/engine" ) -func (daemon *Daemon) ContainerExport(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s container_id", job.Name) - } - name := job.Args[0] - +func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { container, err := daemon.Get(name) if err != nil { return err @@ -25,7 +18,7 @@ func (daemon *Daemon) ContainerExport(job *engine.Job) error { defer data.Close() // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(job.Stdout, data); err != nil { + if _, err := io.Copy(out, data); err != nil { return fmt.Errorf("%s: %s", name, err) } // FIXME: factor job-specific LogEvent to engine.Job.Run() diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md index 1dc918016d523..a090b731faf8c 100644 --- a/daemon/graphdriver/devmapper/README.md +++ b/daemon/graphdriver/devmapper/README.md @@ -186,7 +186,7 @@ Here is the list of supported options: can be achieved by zeroing the first 4k to indicate empty metadata, like this: - ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1``` + ``dd if=/dev/zero of=$metadata_dev bs=4096 count=1`` Example use: @@ -216,3 +216,39 @@ Here is the list of supported options: Example use: ``docker -d --storage-opt dm.blkdiscard=false`` + + * `dm.override_udev_sync_check` + + Overrides the `udev` synchronization checks between `devicemapper` and `udev`. + `udev` is the device manager for the Linux kernel. + + To view the `udev` sync support of a Docker daemon that is using the + `devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + + When `udev` sync support is `true`, then `devicemapper` and udev can + coordinate the activation and deactivation of devices for containers. + + When `udev` sync support is `false`, a race condition occurs between + the`devicemapper` and `udev` during create and cleanup. The race condition + results in errors and failures. (For information on these failures, see + [docker#4036](https://github.com/docker/docker/issues/4036)) + + To allow the `docker` daemon to start, regardless of `udev` sync not being + supported, set `dm.override_udev_sync_check` to true: + + $ docker -d --storage-opt dm.override_udev_sync_check=true + + When this value is `true`, the `devicemapper` continues and simply warns + you the errors are happening. + + > **Note**: The ideal is to pursue a `docker` daemon and environment that + > does support synchronizing with `udev`. For further discussion on this + > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). + > Otherwise, set this flag for migrating existing Docker daemons to a + > daemon with a supported environment. + diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go index a5ad0e676c73f..42b9d76bedf09 100644 --- a/daemon/graphdriver/devmapper/deviceset.go +++ b/daemon/graphdriver/devmapper/deviceset.go @@ -30,7 +30,8 @@ var ( DefaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 DefaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 DefaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + DefaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors + DefaultUdevSyncOverride bool = false MaxDeviceId int = 0xffffff // 24 bit, pool limit DeviceIdMapSz int = (MaxDeviceId + 1) / 8 // We retry device removal so many a times that even error messages @@ -87,20 +88,21 @@ type DeviceSet struct { deviceIdMap []byte // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - Transaction `json:"-"` + dataLoopbackSize int64 + metaDataLoopbackSize int64 + baseFsSize uint64 + filesystem string + mountOptions string + mkfsArgs []string + dataDevice string // block or loop dev + dataLoopFile string // loopback file, if used + metadataDevice string // block or loop dev + metadataLoopFile string // loopback file, if used + doBlkDiscard bool + thinpBlockSize uint32 + thinPoolDevice string + Transaction `json:"-"` + overrideUdevSyncCheck bool } type DiskUsage struct { @@ -216,7 +218,7 @@ func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { } defer file.Close() - if err = file.Truncate(size); err != nil { + if err := file.Truncate(size); err != nil { return "", err } } @@ -695,7 +697,7 @@ func (devices *DeviceSet) setupBaseImage() error { logrus.Debugf("Creating filesystem on base device-mapper thin volume") - if err = devices.activateDeviceIfNeeded(info); err != nil { + if err := devices.activateDeviceIfNeeded(info); err != nil { return err } @@ -704,7 +706,7 @@ func (devices *DeviceSet) setupBaseImage() error { } info.Initialized = true - if err = devices.saveMetadata(info); err != nil { + if err := devices.saveMetadata(info); err != nil { info.Initialized = false return err } @@ -970,9 +972,11 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // https://github.com/docker/docker/issues/4036 if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Warnf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors") + logrus.Errorf("Udev sync is not supported. This will lead to unexpected behavior, data loss and errors. For more information, see https://docs.docker.com/reference/commandline/cli/#daemon-storage-driver-option") + if !devices.overrideUdevSyncCheck { + return graphdriver.ErrNotSupported + } } - logrus.Debugf("devicemapper: udev sync support: %v", devicemapper.UdevSyncSupported()) if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { return err @@ -1095,14 +1099,14 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error { // If we didn't just create the data or metadata image, we need to // load the transaction id and migrate old metadata if !createdLoopback { - if err = devices.initMetaData(); err != nil { + if err := devices.initMetaData(); err != nil { return err } } // Right now this loads only NextDeviceId. If there is more metadata // down the line, we might have to move it earlier. - if err = devices.loadDeviceSetMetaData(); err != nil { + if err := devices.loadDeviceSetMetaData(); err != nil { return err } @@ -1524,8 +1528,7 @@ func (devices *DeviceSet) MetadataDevicePath() string { func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { buf := new(syscall.Statfs_t) - err := syscall.Statfs(loopFile, buf) - if err != nil { + if err := syscall.Statfs(loopFile, buf); err != nil { logrus.Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) return 0, err } @@ -1596,15 +1599,16 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error devicemapper.SetDevDir("/dev") devices := &DeviceSet{ - root: root, - MetaData: MetaData{Devices: make(map[string]*DevInfo)}, - dataLoopbackSize: DefaultDataLoopbackSize, - metaDataLoopbackSize: DefaultMetaDataLoopbackSize, - baseFsSize: DefaultBaseFsSize, - filesystem: "ext4", - doBlkDiscard: true, - thinpBlockSize: DefaultThinpBlockSize, - deviceIdMap: make([]byte, DeviceIdMapSz), + root: root, + MetaData: MetaData{Devices: make(map[string]*DevInfo)}, + dataLoopbackSize: DefaultDataLoopbackSize, + metaDataLoopbackSize: DefaultMetaDataLoopbackSize, + baseFsSize: DefaultBaseFsSize, + overrideUdevSyncCheck: DefaultUdevSyncOverride, + filesystem: "ext4", + doBlkDiscard: true, + thinpBlockSize: DefaultThinpBlockSize, + deviceIdMap: make([]byte, DeviceIdMapSz), } foundBlkDiscard := false @@ -1661,6 +1665,11 @@ func NewDeviceSet(root string, doInit bool, options []string) (*DeviceSet, error } // convert to 512b sectors devices.thinpBlockSize = uint32(size) >> 9 + case "dm.override_udev_sync_check": + devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) + if err != nil { + return nil, err + } default: return nil, fmt.Errorf("Unknown option %s\n", key) } diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go index 6cb7572384fb3..60006af5a5d1c 100644 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ b/daemon/graphdriver/devmapper/devmapper_test.go @@ -13,6 +13,7 @@ func init() { DefaultDataLoopbackSize = 300 * 1024 * 1024 DefaultMetaDataLoopbackSize = 200 * 1024 * 1024 DefaultBaseFsSize = 300 * 1024 * 1024 + DefaultUdevSyncOverride = true if err := graphtest.InitLoopbacks(); err != nil { panic(err) } diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go index 26095b05c4747..c57dd87136cb7 100644 --- a/daemon/graphdriver/driver.go +++ b/daemon/graphdriver/driver.go @@ -14,20 +14,22 @@ import ( type FsMagic uint32 const ( - FsMagicBtrfs = FsMagic(0x9123683E) FsMagicAufs = FsMagic(0x61756673) - FsMagicExtfs = FsMagic(0x0000EF53) + FsMagicBtrfs = FsMagic(0x9123683E) FsMagicCramfs = FsMagic(0x28cd3d45) - FsMagicRamFs = FsMagic(0x858458f6) - FsMagicTmpFs = FsMagic(0x01021994) - FsMagicSquashFs = FsMagic(0x73717368) + FsMagicExtfs = FsMagic(0x0000EF53) + FsMagicF2fs = FsMagic(0xF2F52010) + FsMagicJffs2Fs = FsMagic(0x000072b6) + FsMagicJfs = FsMagic(0x3153464a) FsMagicNfsFs = FsMagic(0x00006969) + FsMagicRamFs = FsMagic(0x858458f6) FsMagicReiserFs = FsMagic(0x52654973) FsMagicSmbFs = FsMagic(0x0000517B) - FsMagicJffs2Fs = FsMagic(0x000072b6) - FsMagicZfs = FsMagic(0x2fc12fc1) - FsMagicXfs = FsMagic(0x58465342) + FsMagicSquashFs = FsMagic(0x73717368) + FsMagicTmpFs = FsMagic(0x01021994) FsMagicUnsupported = FsMagic(0x00000000) + FsMagicXfs = FsMagic(0x58465342) + FsMagicZfs = FsMagic(0x2fc12fc1) ) var ( @@ -50,18 +52,20 @@ var ( FsNames = map[FsMagic]string{ FsMagicAufs: "aufs", FsMagicBtrfs: "btrfs", - FsMagicExtfs: "extfs", FsMagicCramfs: "cramfs", - FsMagicRamFs: "ramfs", - FsMagicTmpFs: "tmpfs", - FsMagicSquashFs: "squashfs", + FsMagicExtfs: "extfs", + FsMagicF2fs: "f2fs", + FsMagicJffs2Fs: "jffs2", + FsMagicJfs: "jfs", FsMagicNfsFs: "nfs", + FsMagicRamFs: "ramfs", FsMagicReiserFs: "reiserfs", FsMagicSmbFs: "smb", - FsMagicJffs2Fs: "jffs2", - FsMagicZfs: "zfs", - FsMagicXfs: "xfs", + FsMagicSquashFs: "squashfs", + FsMagicTmpFs: "tmpfs", FsMagicUnsupported: "unsupported", + FsMagicXfs: "xfs", + FsMagicZfs: "zfs", } ) @@ -142,10 +146,40 @@ func GetDriver(name, home string, options []string) (Driver, error) { func New(root string, options []string) (driver Driver, err error) { for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver return GetDriver(name, root, options) } } + // Guess for prior driver + priorDrivers := scanPriorDrivers(root) + for _, name := range priority { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + for _, prior := range priorDrivers { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + if prior == name { + driver, err = GetDriver(name, root, options) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) + return nil, err + } + if err := checkPriorDriver(name, root); err != nil { + return nil, err + } + logrus.Infof("[graphdriver] using prior storage driver %q", name) + return driver, nil + } + } + } + // Check for priority drivers first for _, name := range priority { driver, err = GetDriver(name, root, options) @@ -155,34 +189,47 @@ func New(root string, options []string) (driver Driver, err error) { } return nil, err } - checkPriorDriver(name, root) return driver, nil } // Check all registered drivers if no priority driver is found - for name, initFunc := range drivers { + for _, initFunc := range drivers { if driver, err = initFunc(root, options); err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } - checkPriorDriver(name, root) return driver, nil } return nil, fmt.Errorf("No supported storage backend found") } -func checkPriorDriver(name, root string) { +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) []string { priorDrivers := []string{} - for prior := range drivers { + for driver := range drivers { + p := path.Join(root, driver) + if _, err := os.Stat(p); err == nil { + priorDrivers = append(priorDrivers, driver) + } + } + return priorDrivers +} + +func checkPriorDriver(name, root string) error { + priorDrivers := []string{} + for _, prior := range scanPriorDrivers(root) { if prior != name && prior != "vfs" { if _, err := os.Stat(path.Join(root, prior)); err == nil { priorDrivers = append(priorDrivers, prior) } } } + if len(priorDrivers) > 0 { - logrus.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ",")) + + return errors.New(fmt.Sprintf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(priorDrivers, ","))) } + return nil } diff --git a/daemon/image_delete.go b/daemon/image_delete.go index a44eb1bfa6771..ece33a3c78526 100644 --- a/daemon/image_delete.go +++ b/daemon/image_delete.go @@ -30,6 +30,7 @@ func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, fi repoName, tag string tags = []string{} ) + repoAndTags := make(map[string][]string) // FIXME: please respect DRY and centralize repo+tag parsing in a single central place! -- shykes repoName, tag = parsers.ParseRepositoryTag(name) @@ -68,19 +69,25 @@ func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, fi if repoName == "" || repoName == parsedRepo { repoName = parsedRepo if parsedTag != "" { - tags = append(tags, parsedTag) + repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) } } else if repoName != parsedRepo && !force && first { // the id belongs to multiple repos, like base:latest and user:test, // in that case return conflict return fmt.Errorf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", name) + } else { + //the id belongs to multiple repos, with -f just delete all + repoName = parsedRepo + if parsedTag != "" { + repoAndTags[repoName] = append(repoAndTags[repoName], parsedTag) + } } } } else { - tags = append(tags, tag) + repoAndTags[repoName] = append(repoAndTags[repoName], tag) } - if !first && len(tags) > 0 { + if !first && len(repoAndTags) > 0 { return nil } @@ -91,16 +98,18 @@ func (daemon *Daemon) imgDeleteHelper(name string, list *[]types.ImageDelete, fi } // Untag the current image - for _, tag := range tags { - tagDeleted, err := daemon.Repositories().Delete(repoName, tag) - if err != nil { - return err - } - if tagDeleted { - *list = append(*list, types.ImageDelete{ - Untagged: utils.ImageReference(repoName, tag), - }) - daemon.EventsService.Log("untag", img.ID, "") + for repoName, tags := range repoAndTags { + for _, tag := range tags { + tagDeleted, err := daemon.Repositories().Delete(repoName, tag) + if err != nil { + return err + } + if tagDeleted { + *list = append(*list, types.ImageDelete{ + Untagged: utils.ImageReference(repoName, tag), + }) + daemon.EventsService.Log("untag", img.ID, "") + } } } tags = daemon.Repositories().ByID()[img.ID] diff --git a/daemon/info.go b/daemon/info.go index 183a9e68bb01b..df1c0530ccc0a 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -6,8 +6,9 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/engine" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/parsers/kernel" "github.com/docker/docker/pkg/parsers/operatingsystem" "github.com/docker/docker/pkg/system" @@ -15,7 +16,7 @@ import ( "github.com/docker/docker/utils" ) -func (daemon *Daemon) CmdInfo(job *engine.Job) error { +func (daemon *Daemon) SystemInfo() (*types.Info, error) { images, _ := daemon.Graph().Map() var imgcount int if images == nil { @@ -32,11 +33,15 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) error { if s, err := operatingsystem.GetOperatingSystem(); err == nil { operatingSystem = s } - if inContainer, err := operatingsystem.IsContainerized(); err != nil { - logrus.Errorf("Could not determine if daemon is containerized: %v", err) - operatingSystem += " (error determining if containerized)" - } else if inContainer { - operatingSystem += " (containerized)" + + // Don't do containerized check on Windows + if runtime.GOOS != "windows" { + if inContainer, err := operatingsystem.IsContainerized(); err != nil { + logrus.Errorf("Could not determine if daemon is containerized: %v", err) + operatingSystem += " (error determining if containerized)" + } else if inContainer { + operatingSystem += " (containerized)" + } } meminfo, err := system.ReadMemInfo() @@ -51,47 +56,47 @@ func (daemon *Daemon) CmdInfo(job *engine.Job) error { initPath = daemon.SystemInitPath() } - v := &engine.Env{} - v.SetJson("ID", daemon.ID) - v.SetInt("Containers", len(daemon.List())) - v.SetInt("Images", imgcount) - v.Set("Driver", daemon.GraphDriver().String()) - v.SetJson("DriverStatus", daemon.GraphDriver().Status()) - v.SetBool("MemoryLimit", daemon.SystemConfig().MemoryLimit) - v.SetBool("SwapLimit", daemon.SystemConfig().SwapLimit) - v.SetBool("IPv4Forwarding", !daemon.SystemConfig().IPv4ForwardingDisabled) - v.SetBool("Debug", os.Getenv("DEBUG") != "") - v.SetInt("NFd", utils.GetTotalUsedFds()) - v.SetInt("NGoroutines", runtime.NumGoroutine()) - v.Set("SystemTime", time.Now().Format(time.RFC3339Nano)) - v.Set("ExecutionDriver", daemon.ExecutionDriver().Name()) - v.Set("LoggingDriver", daemon.defaultLogConfig.Type) - v.SetInt("NEventsListener", daemon.EventsService.SubscribersCount()) - v.Set("KernelVersion", kernelVersion) - v.Set("OperatingSystem", operatingSystem) - v.Set("IndexServerAddress", registry.IndexServerAddress()) - v.SetJson("RegistryConfig", daemon.RegistryService.Config) - v.Set("InitSha1", dockerversion.INITSHA1) - v.Set("InitPath", initPath) - v.SetInt("NCPU", runtime.NumCPU()) - v.SetInt64("MemTotal", meminfo.MemTotal) - v.Set("DockerRootDir", daemon.Config().Root) + v := &types.Info{ + ID: daemon.ID, + Containers: len(daemon.List()), + Images: imgcount, + Driver: daemon.GraphDriver().String(), + DriverStatus: daemon.GraphDriver().Status(), + MemoryLimit: daemon.SystemConfig().MemoryLimit, + SwapLimit: daemon.SystemConfig().SwapLimit, + CpuCfsQuota: daemon.SystemConfig().CpuCfsQuota, + IPv4Forwarding: !daemon.SystemConfig().IPv4ForwardingDisabled, + Debug: os.Getenv("DEBUG") != "", + NFd: fileutils.GetTotalUsedFds(), + NGoroutines: runtime.NumGoroutine(), + SystemTime: time.Now().Format(time.RFC3339Nano), + ExecutionDriver: daemon.ExecutionDriver().Name(), + LoggingDriver: daemon.defaultLogConfig.Type, + NEventsListener: daemon.EventsService.SubscribersCount(), + KernelVersion: kernelVersion, + OperatingSystem: operatingSystem, + IndexServerAddress: registry.IndexServerAddress(), + RegistryConfig: daemon.RegistryService.Config, + InitSha1: dockerversion.INITSHA1, + InitPath: initPath, + NCPU: runtime.NumCPU(), + MemTotal: meminfo.MemTotal, + DockerRootDir: daemon.Config().Root, + Labels: daemon.Config().Labels, + } + if httpProxy := os.Getenv("http_proxy"); httpProxy != "" { - v.Set("HttpProxy", httpProxy) + v.HttpProxy = httpProxy } if httpsProxy := os.Getenv("https_proxy"); httpsProxy != "" { - v.Set("HttpsProxy", httpsProxy) + v.HttpsProxy = httpsProxy } if noProxy := os.Getenv("no_proxy"); noProxy != "" { - v.Set("NoProxy", noProxy) + v.NoProxy = noProxy } - if hostname, err := os.Hostname(); err == nil { - v.SetJson("Name", hostname) - } - v.SetList("Labels", daemon.Config().Labels) - if _, err := v.WriteTo(job.Stdout); err != nil { - return err + v.Name = hostname } - return nil + + return v, nil } diff --git a/daemon/inspect.go b/daemon/inspect.go index 73ce2ea8e250a..56db3d059b12c 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -1,99 +1,99 @@ package daemon import ( - "encoding/json" "fmt" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" "github.com/docker/docker/runconfig" ) -func (daemon *Daemon) ContainerInspect(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s NAME", job.Name) - } - name := job.Args[0] +type ContainerJSONRaw struct { + *Container + HostConfig *runconfig.HostConfig +} + +func (daemon *Daemon) ContainerInspectRaw(name string) (*ContainerJSONRaw, error) { container, err := daemon.Get(name) if err != nil { - return err + return nil, err } container.Lock() defer container.Unlock() - if job.GetenvBool("raw") { - b, err := json.Marshal(&struct { - *Container - HostConfig *runconfig.HostConfig - }{container, container.hostConfig}) - if err != nil { - return err - } - job.Stdout.Write(b) - return nil + + return &ContainerJSONRaw{container, container.hostConfig}, nil +} + +func (daemon *Daemon) ContainerInspect(name string) (*types.ContainerJSON, error) { + container, err := daemon.Get(name) + if err != nil { + return nil, err } - out := &engine.Env{} - out.SetJson("Id", container.ID) - out.SetAuto("Created", container.Created) - out.SetJson("Path", container.Path) - out.SetList("Args", container.Args) - out.SetJson("Config", container.Config) - out.SetJson("State", container.State) - out.Set("Image", container.ImageID) - out.SetJson("NetworkSettings", container.NetworkSettings) - out.Set("ResolvConfPath", container.ResolvConfPath) - out.Set("HostnamePath", container.HostnamePath) - out.Set("HostsPath", container.HostsPath) - out.Set("LogPath", container.LogPath) - out.SetJson("Name", container.Name) - out.SetInt("RestartCount", container.RestartCount) - out.Set("Driver", container.Driver) - out.Set("ExecDriver", container.ExecDriver) - out.Set("MountLabel", container.MountLabel) - out.Set("ProcessLabel", container.ProcessLabel) - out.SetJson("Volumes", container.Volumes) - out.SetJson("VolumesRW", container.VolumesRW) - out.SetJson("AppArmorProfile", container.AppArmorProfile) + container.Lock() + defer container.Unlock() - out.SetList("ExecIDs", container.GetExecIDs()) + // make a copy to play with + hostConfig := *container.hostConfig if children, err := daemon.Children(container.Name); err == nil { for linkAlias, child := range children { - container.hostConfig.Links = append(container.hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) + hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) } } // we need this trick to preserve empty log driver, so // container will use daemon defaults even if daemon change them - if container.hostConfig.LogConfig.Type == "" { - container.hostConfig.LogConfig = daemon.defaultLogConfig - defer func() { - container.hostConfig.LogConfig = runconfig.LogConfig{} - }() + if hostConfig.LogConfig.Type == "" { + hostConfig.LogConfig = daemon.defaultLogConfig } - out.SetJson("HostConfig", container.hostConfig) + containerState := &types.ContainerState{ + Running: container.State.Running, + Paused: container.State.Paused, + Restarting: container.State.Restarting, + OOMKilled: container.State.OOMKilled, + Dead: container.State.Dead, + Pid: container.State.Pid, + ExitCode: container.State.ExitCode, + Error: container.State.Error, + StartedAt: container.State.StartedAt, + FinishedAt: container.State.FinishedAt, + } - container.hostConfig.Links = nil - if _, err := out.WriteTo(job.Stdout); err != nil { - return err + contJSON := &types.ContainerJSON{ + Id: container.ID, + Created: container.Created, + Path: container.Path, + Args: container.Args, + Config: container.Config, + State: containerState, + Image: container.ImageID, + NetworkSettings: container.NetworkSettings, + ResolvConfPath: container.ResolvConfPath, + HostnamePath: container.HostnamePath, + HostsPath: container.HostsPath, + LogPath: container.LogPath, + Name: container.Name, + RestartCount: container.RestartCount, + Driver: container.Driver, + ExecDriver: container.ExecDriver, + MountLabel: container.MountLabel, + ProcessLabel: container.ProcessLabel, + Volumes: container.Volumes, + VolumesRW: container.VolumesRW, + AppArmorProfile: container.AppArmorProfile, + ExecIDs: container.GetExecIDs(), + HostConfig: &hostConfig, } - return nil + + return contJSON, nil } -func (daemon *Daemon) ContainerExecInspect(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s ID", job.Name) - } - id := job.Args[0] +func (daemon *Daemon) ContainerExecInspect(id string) (*execConfig, error) { eConfig, err := daemon.getExecConfig(id) if err != nil { - return err + return nil, err } - b, err := json.Marshal(*eConfig) - if err != nil { - return err - } - job.Stdout.Write(b) - return nil + return eConfig, nil } diff --git a/daemon/kill.go b/daemon/kill.go index 56bcad900e70a..5d828f16b2321 100644 --- a/daemon/kill.go +++ b/daemon/kill.go @@ -2,43 +2,14 @@ package daemon import ( "fmt" - "strconv" - "strings" "syscall" - - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/signal" ) // ContainerKill send signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. -func (daemon *Daemon) ContainerKill(job *engine.Job) error { - if n := len(job.Args); n < 1 || n > 2 { - return fmt.Errorf("Usage: %s CONTAINER [SIGNAL]", job.Name) - } - var ( - name = job.Args[0] - sig uint64 - err error - ) - - // If we have a signal, look at it. Otherwise, do nothing - if len(job.Args) == 2 && job.Args[1] != "" { - // Check if we passed the signal as a number: - // The largest legal signal is 31, so let's parse on 5 bits - sig, err = strconv.ParseUint(job.Args[1], 10, 5) - if err != nil { - // The signal is not a number, treat it as a string (either like "KILL" or like "SIGKILL") - sig = uint64(signal.SignalMap[strings.TrimPrefix(job.Args[1], "SIG")]) - } - - if sig == 0 { - return fmt.Errorf("Invalid signal: %s", job.Args[1]) - } - } - +func (daemon *Daemon) ContainerKill(name string, sig uint64) error { container, err := daemon.Get(name) if err != nil { return err @@ -49,13 +20,12 @@ func (daemon *Daemon) ContainerKill(job *engine.Job) error { if err := container.Kill(); err != nil { return fmt.Errorf("Cannot kill container %s: %s", name, err) } - container.LogEvent("kill") } else { // Otherwise, just send the requested signal if err := container.KillSig(int(sig)); err != nil { return fmt.Errorf("Cannot kill container %s: %s", name, err) } - // FIXME: Add event for signals } + container.LogEvent("kill") return nil } diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go new file mode 100644 index 0000000000000..5eb141ac83f70 --- /dev/null +++ b/daemon/logger/journald/journald.go @@ -0,0 +1,35 @@ +package journald + +import ( + "fmt" + + "github.com/coreos/go-systemd/journal" + "github.com/docker/docker/daemon/logger" +) + +type Journald struct { + Jmap map[string]string +} + +func New(id string) (logger.Logger, error) { + if !journal.Enabled() { + return nil, fmt.Errorf("journald is not enabled on this host") + } + jmap := map[string]string{"MESSAGE_ID": id} + return &Journald{Jmap: jmap}, nil +} + +func (s *Journald) Log(msg *logger.Message) error { + if msg.Source == "stderr" { + return journal.Send(string(msg.Line), journal.PriErr, s.Jmap) + } + return journal.Send(string(msg.Line), journal.PriInfo, s.Jmap) +} + +func (s *Journald) Close() error { + return nil +} + +func (s *Journald) Name() string { + return "Journald" +} diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go index afd3dacbb481f..a250d6e93cb77 100644 --- a/daemon/logger/syslog/syslog.go +++ b/daemon/logger/syslog/syslog.go @@ -11,26 +11,23 @@ import ( type Syslog struct { writer *syslog.Writer - tag string } func New(tag string) (logger.Logger, error) { - log, err := syslog.New(syslog.LOG_USER, path.Base(os.Args[0])) + log, err := syslog.New(syslog.LOG_DAEMON, fmt.Sprintf("%s/%s", path.Base(os.Args[0]), tag)) if err != nil { return nil, err } return &Syslog{ writer: log, - tag: tag, }, nil } func (s *Syslog) Log(msg *logger.Message) error { - logMessage := fmt.Sprintf("%s: %s", s.tag, msg.Line) if msg.Source == "stderr" { - return s.writer.Err(logMessage) + return s.writer.Err(string(msg.Line)) } - return s.writer.Info(logMessage) + return s.writer.Info(string(msg.Line)) } func (s *Syslog) Close() error { diff --git a/daemon/logs.go b/daemon/logs.go index c991fa1978a46..79d4044bbe20b 100644 --- a/daemon/logs.go +++ b/daemon/logs.go @@ -10,40 +10,50 @@ import ( "sync" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/jsonlog" + "github.com/docker/docker/pkg/stdcopy" "github.com/docker/docker/pkg/tailfile" "github.com/docker/docker/pkg/timeutils" ) -func (daemon *Daemon) ContainerLogs(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s CONTAINER\n", job.Name) - } +type ContainerLogsConfig struct { + Follow, Timestamps bool + Tail string + UseStdout, UseStderr bool + OutStream io.Writer +} +func (daemon *Daemon) ContainerLogs(name string, config *ContainerLogsConfig) error { var ( - name = job.Args[0] - stdout = job.GetenvBool("stdout") - stderr = job.GetenvBool("stderr") - tail = job.Getenv("tail") - follow = job.GetenvBool("follow") - times = job.GetenvBool("timestamps") lines = -1 format string ) - if !(stdout || stderr) { + if !(config.UseStdout || config.UseStderr) { return fmt.Errorf("You must choose at least one stream") } - if times { + if config.Timestamps { format = timeutils.RFC3339NanoFixed } - if tail == "" { - tail = "all" + if config.Tail == "" { + config.Tail = "all" } + container, err := daemon.Get(name) if err != nil { return err } + + var ( + outStream = config.OutStream + errStream io.Writer + ) + if !container.Config.Tty { + errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) + outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) + } else { + errStream = outStream + } + if container.LogDriverType() != "json-file" { return fmt.Errorf("\"logs\" endpoint is supported only for \"json-file\" logging driver") } @@ -51,30 +61,30 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) error { if err != nil && os.IsNotExist(err) { // Legacy logs logrus.Debugf("Old logs format") - if stdout { + if config.UseStdout { cLog, err := container.ReadLog("stdout") if err != nil { logrus.Errorf("Error reading logs (stdout): %s", err) - } else if _, err := io.Copy(job.Stdout, cLog); err != nil { + } else if _, err := io.Copy(outStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stdout): %s", err) } } - if stderr { + if config.UseStderr { cLog, err := container.ReadLog("stderr") if err != nil { logrus.Errorf("Error reading logs (stderr): %s", err) - } else if _, err := io.Copy(job.Stderr, cLog); err != nil { + } else if _, err := io.Copy(errStream, cLog); err != nil { logrus.Errorf("Error streaming logs (stderr): %s", err) } } } else if err != nil { logrus.Errorf("Error reading logs (json): %s", err) } else { - if tail != "all" { + if config.Tail != "all" { var err error - lines, err = strconv.Atoi(tail) + lines, err = strconv.Atoi(config.Tail) if err != nil { - logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", tail, err) + logrus.Errorf("Failed to parse tail %s, error: %v, show all logs", config.Tail, err) lines = -1 } } @@ -101,39 +111,39 @@ func (daemon *Daemon) ContainerLogs(job *engine.Job) error { break } logLine := l.Log - if times { + if config.Timestamps { // format can be "" or time format, so here can't be error logLine, _ = l.Format(format) } - if l.Stream == "stdout" && stdout { - io.WriteString(job.Stdout, logLine) + if l.Stream == "stdout" && config.UseStdout { + io.WriteString(outStream, logLine) } - if l.Stream == "stderr" && stderr { - io.WriteString(job.Stderr, logLine) + if l.Stream == "stderr" && config.UseStderr { + io.WriteString(errStream, logLine) } l.Reset() } } } - if follow && container.IsRunning() { + if config.Follow && container.IsRunning() { errors := make(chan error, 2) wg := sync.WaitGroup{} - if stdout { + if config.UseStdout { wg.Add(1) stdoutPipe := container.StdoutLogPipe() defer stdoutPipe.Close() go func() { - errors <- jsonlog.WriteLog(stdoutPipe, job.Stdout, format) + errors <- jsonlog.WriteLog(stdoutPipe, outStream, format) wg.Done() }() } - if stderr { + if config.UseStderr { wg.Add(1) stderrPipe := container.StderrLogPipe() defer stderrPipe.Close() go func() { - errors <- jsonlog.WriteLog(stderrPipe, job.Stderr, format) + errors <- jsonlog.WriteLog(stderrPipe, errStream, format) wg.Done() }() } diff --git a/daemon/monitor.go b/daemon/monitor.go index 293849dd364b4..7474d68c158fd 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -10,6 +10,7 @@ import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/libcontainer" ) const defaultTimeIncrement = 100 @@ -44,6 +45,9 @@ type containerMonitor struct { // left waiting for nothing to happen during this time stopChan chan struct{} + // like startSignal but for restoring a container + restoreSignal chan struct{} + // timeIncrement is the amount of time to wait between restarts // this is in milliseconds timeIncrement int @@ -61,6 +65,7 @@ func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) * timeIncrement: defaultTimeIncrement, stopChan: make(chan struct{}), startSignal: make(chan struct{}), + restoreSignal: make(chan struct{}), } } @@ -181,6 +186,51 @@ func (m *containerMonitor) Start() error { } } +// Like Start() but for restoring a container. +func (m *containerMonitor) Restore(opts *libcontainer.CriuOpts) error { + var ( + err error + // XXX The following line should be changed to + // exitStatus execdriver.ExitStatus to match Start() + exitCode execdriver.ExitStatus + afterRestore bool + ) + defer func() { + if afterRestore { + m.container.Lock() + m.container.setStopped(&execdriver.ExitStatus{exitCode.ExitCode, false}) + defer m.container.Unlock() + } + m.Close() + }() + + // FIXME: right now if we startLogging again we get double logs after a restore + if m.container.logCopier == nil { + if err := m.container.startLogging(); err != nil { + m.resetContainer(false) + return err + } + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("restore") + m.lastStartTime = time.Now() + if exitCode, err = m.container.daemon.Restore(m.container, pipes, m.restoreCallback, opts); err != nil { + logrus.Errorf("Error restoring container: %s, exitCode=%d", err, exitCode) + m.container.ExitCode = -1 + m.resetContainer(false) + return err + } + afterRestore = true + + m.container.ExitCode = exitCode.ExitCode + m.resetMonitor(err == nil && exitCode.ExitCode == 0) + m.container.LogEvent("die") + m.resetContainer(true) + return err +} + // resetMonitor resets the stateful fields on the containerMonitor based on the // previous runs success or failure. Regardless of success, if the container had // an execution time of more than 10s then reset the timer back to the default @@ -267,6 +317,29 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid } } +// Like callback() but for restoring a container. +func (m *containerMonitor) restoreCallback(processConfig *execdriver.ProcessConfig, restorePid int) { + // If restorePid is 0, it means that restore failed. + if restorePid != 0 { + m.container.setRunning(restorePid) + } + + // Unblock the goroutine waiting in waitForRestore(). + select { + case <-m.restoreSignal: + default: + close(m.restoreSignal) + } + + if restorePid != 0 { + // Write config.json and hostconfig.json files + // to /var/lib/docker/containers/. + if err := m.container.ToDisk(); err != nil { + logrus.Debugf("%s", err) + } + } +} + // resetContainer resets the container's IO and ensures that the command is able to be executed again // by copying the data into a new struct // if lock is true, then container locked during reset diff --git a/daemon/networkdriver/bridge/driver.go b/daemon/networkdriver/bridge/driver.go index dabb1165e76bd..6dca828524122 100644 --- a/daemon/networkdriver/bridge/driver.go +++ b/daemon/networkdriver/bridge/driver.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net" "os" + "os/exec" "strconv" "strings" "sync" @@ -76,8 +77,10 @@ var ( bridgeIface string bridgeIPv4Network *net.IPNet + gatewayIPv4 net.IP bridgeIPv6Addr net.IP globalIPv6Network *net.IPNet + gatewayIPv6 net.IP portMapper *portmapper.PortMapper once sync.Once @@ -102,6 +105,8 @@ type Config struct { IP string FixedCIDR string FixedCIDRv6 string + DefaultGatewayIPv4 string + DefaultGatewayIPv6 string InterContainerCommunication bool } @@ -113,6 +118,13 @@ func InitDriver(config *Config) error { addrsv6 []net.Addr bridgeIPv6 = "fe80::1/64" ) + + // try to modprobe bridge first + // see gh#12177 + if out, err := exec.Command("modprobe", "-va", "bridge", "nf_nat").Output(); err != nil { + logrus.Warnf("Running modprobe bridge nf_nat failed with message: %s, error: %v", out, err) + } + initPortMapper() if config.DefaultIp != nil { @@ -135,8 +147,11 @@ func InitDriver(config *Config) error { return err } + logrus.Info("Bridge interface not found, trying to create it") + // If the iface is not found, try to create it if err := configureBridge(config.IP, bridgeIPv6, config.EnableIPv6); err != nil { + logrus.Errorf("Could not configure Bridge: %s", err) return err } @@ -211,12 +226,18 @@ func InitDriver(config *Config) error { bridgeIPv6Addr = networkv6.IP } + if config.EnableIptables { + iptables.FirewalldInit() + } + // Configure iptables for link support if config.EnableIptables { if err := setupIPTables(addrv4, config.InterContainerCommunication, config.EnableIpMasq); err != nil { + logrus.Errorf("Error configuring iptables: %s", err) return err } - + // call this on Firewalld reload + iptables.OnReloaded(func() { setupIPTables(addrv4, config.InterContainerCommunication, config.EnableIpMasq) }) } if config.EnableIpForward { @@ -246,10 +267,16 @@ func InitDriver(config *Config) error { if err != nil { return err } + // call this on Firewalld reload + iptables.OnReloaded(func() { iptables.NewChain("DOCKER", bridgeIface, iptables.Nat) }) + chain, err := iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) if err != nil { return err } + // call this on Firewalld reload + iptables.OnReloaded(func() { iptables.NewChain("DOCKER", bridgeIface, iptables.Filter) }) + portMapper.SetIptablesChain(chain) } @@ -261,10 +288,17 @@ func InitDriver(config *Config) error { } logrus.Debugf("Subnet: %v", subnet) if err := ipAllocator.RegisterSubnet(bridgeIPv4Network, subnet); err != nil { + logrus.Errorf("Error registering subnet for IPv4 bridge network: %s", err) return err } } + if gateway, err := requestDefaultGateway(config.DefaultGatewayIPv4, bridgeIPv4Network); err != nil { + return err + } else { + gatewayIPv4 = gateway + } + if config.FixedCIDRv6 != "" { _, subnet, err := net.ParseCIDR(config.FixedCIDRv6) if err != nil { @@ -272,13 +306,24 @@ func InitDriver(config *Config) error { } logrus.Debugf("Subnet: %v", subnet) if err := ipAllocator.RegisterSubnet(subnet, subnet); err != nil { + logrus.Errorf("Error registering subnet for IPv6 bridge network: %s", err) return err } globalIPv6Network = subnet + + if gateway, err := requestDefaultGateway(config.DefaultGatewayIPv6, globalIPv6Network); err != nil { + return err + } else { + gatewayIPv6 = gateway + } } // Block BridgeIP in IP allocator - ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP) + ipAllocator.RequestIP(bridgeIPv4Network, bridgeIPv4Network.IP, false) + + if config.EnableIptables { + iptables.OnReloaded(portMapper.ReMapAll) // call this on Firewalld reload + } return nil } @@ -294,7 +339,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { "-t", string(iptables.Nat), "-I", "POSTROUTING"}, natArgs...)...); err != nil { return fmt.Errorf("Unable to enable network bridge NAT: %s", err) } else if len(output) != 0 { - return &iptables.ChainError{Chain: "POSTROUTING", Output: output} + return iptables.ChainError{Chain: "POSTROUTING", Output: output} } } } @@ -310,7 +355,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if !iptables.Exists(iptables.Filter, "FORWARD", dropArgs...) { logrus.Debugf("Disable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, dropArgs...)...); err != nil { + if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, dropArgs...)...); err != nil { return fmt.Errorf("Unable to prevent intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error disabling intercontainer communication: %s", output) @@ -321,7 +366,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if !iptables.Exists(iptables.Filter, "FORWARD", acceptArgs...) { logrus.Debugf("Enable inter-container communication") - if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, acceptArgs...)...); err != nil { + if output, err := iptables.Raw(append([]string{"-A", "FORWARD"}, acceptArgs...)...); err != nil { return fmt.Errorf("Unable to allow intercontainer communication: %s", err) } else if len(output) != 0 { return fmt.Errorf("Error enabling intercontainer communication: %s", output) @@ -335,7 +380,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, outgoingArgs...)...); err != nil { return fmt.Errorf("Unable to allow outgoing packets: %s", err) } else if len(output) != 0 { - return &iptables.ChainError{Chain: "FORWARD outgoing", Output: output} + return iptables.ChainError{Chain: "FORWARD outgoing", Output: output} } } @@ -346,7 +391,7 @@ func setupIPTables(addr net.Addr, icc, ipmasq bool) error { if output, err := iptables.Raw(append([]string{"-I", "FORWARD"}, existingArgs...)...); err != nil { return fmt.Errorf("Unable to allow incoming packets: %s", err) } else if len(output) != 0 { - return &iptables.ChainError{Chain: "FORWARD incoming", Output: output} + return iptables.ChainError{Chain: "FORWARD incoming", Output: output} } } return nil @@ -459,6 +504,24 @@ func setupIPv6Bridge(bridgeIPv6 string) error { return nil } +func requestDefaultGateway(requestedGateway string, network *net.IPNet) (gateway net.IP, err error) { + if requestedGateway != "" { + gateway = net.ParseIP(requestedGateway) + + if gateway == nil { + return nil, fmt.Errorf("Bad parameter: invalid gateway ip %s", requestedGateway) + } + + if !network.Contains(gateway) { + return nil, fmt.Errorf("Gateway ip %s must be part of the network %s", requestedGateway, network.String()) + } + + ipAllocator.RequestIP(network, gateway, false) + } + + return gateway, nil +} + func createBridgeIface(name string) error { kv, err := kernel.GetKernelVersion() // Only set the bridge's mac address if the kernel version is > 3.3 @@ -505,16 +568,34 @@ func linkLocalIPv6FromMac(mac string) (string, error) { return fmt.Sprintf("fe80::%x%x:%xff:fe%x:%x%x/64", hw[0], hw[1], hw[2], hw[3], hw[4], hw[5]), nil } +// This function is called from restore (in daemon/daemon.go) +// to reserve the IP address of a checkpointed container when +// the daemon starts. +func ReserveIP(id, ipAddr string) error { + logrus.Debugf("reserving IP %s at %v", ipAddr, bridgeIPv4Network) + ip, err := ipAllocator.RequestIP(bridgeIPv4Network, net.ParseIP(ipAddr), false) + if err != nil { + return err + } + currentInterfaces.Set(id, &networkInterface{ + IP: ip, + }) + return nil +} + // Allocate a network interface -func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Settings, error) { +func Allocate(id, requestedMac, requestedIP, requestedIPv6 string, restoring bool) (*network.Settings, error) { var ( - ip net.IP - mac net.HardwareAddr - err error - globalIPv6 net.IP + ip net.IP + mac net.HardwareAddr + err error + globalIPv6 net.IP + defaultGWIPv4 net.IP + defaultGWIPv6 net.IP ) - ip, err = ipAllocator.RequestIP(bridgeIPv4Network, net.ParseIP(requestedIP)) + ip, err = ipAllocator.RequestIP(bridgeIPv4Network, net.ParseIP(requestedIP), restoring) + if err != nil { return nil, err } @@ -522,6 +603,9 @@ func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Set // If no explicit mac address was given, generate a random one. if mac, err = net.ParseMAC(requestedMac); err != nil { mac = generateMacAddr(ip) + logrus.Debugf("using generated MAC address: %v", mac) + } else { + logrus.Debugf("using requested MAC address: %v", mac) } if globalIPv6Network != nil { @@ -536,7 +620,7 @@ func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Set } } - globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, ipv6) + globalIPv6, err = ipAllocator.RequestIP(globalIPv6Network, ipv6, restoring) if err != nil { logrus.Errorf("Allocator: RequestIP v6: %v", err) return nil, err @@ -546,6 +630,18 @@ func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Set maskSize, _ := bridgeIPv4Network.Mask.Size() + if gatewayIPv4 != nil { + defaultGWIPv4 = gatewayIPv4 + } else { + defaultGWIPv4 = bridgeIPv4Network.IP + } + + if gatewayIPv6 != nil { + defaultGWIPv6 = gatewayIPv6 + } else { + defaultGWIPv6 = bridgeIPv6Addr + } + // If linklocal IPv6 localIPv6Net, err := linkLocalIPv6FromMac(mac.String()) if err != nil { @@ -555,7 +651,7 @@ func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Set networkSettings := &network.Settings{ IPAddress: ip.String(), - Gateway: bridgeIPv4Network.IP.String(), + Gateway: defaultGWIPv4.String(), MacAddress: mac.String(), Bridge: bridgeIface, IPPrefixLen: maskSize, @@ -566,7 +662,7 @@ func Allocate(id, requestedMac, requestedIP, requestedIPv6 string) (*network.Set networkSettings.GlobalIPv6Address = globalIPv6.String() maskV6Size, _ := globalIPv6Network.Mask.Size() networkSettings.GlobalIPv6PrefixLen = maskV6Size - networkSettings.IPv6Gateway = bridgeIPv6Addr.String() + networkSettings.IPv6Gateway = defaultGWIPv6.String() } currentInterfaces.Set(id, &networkInterface{ @@ -583,6 +679,7 @@ func Release(id string) { if containerInterface == nil { logrus.Warnf("No network information to release for %s", id) + return } for _, nat := range containerInterface.PortMappings { @@ -602,7 +699,7 @@ func Release(id string) { } // Allocate an external port and map it to the interface -func AllocatePort(id string, port nat.Port, binding nat.PortBinding) (nat.PortBinding, error) { +func AllocatePort(id string, port nat.Port, binding nat.PortBinding, restoring bool) (nat.PortBinding, error) { var ( ip = defaultBindingIP proto = port.Proto() @@ -657,7 +754,18 @@ func AllocatePort(id string, port nat.Port, binding nat.PortBinding) (nat.PortBi } if err != nil { - return nat.PortBinding{}, err + // If we're restoring on the same Docker server, we + // should not error because we didn't release the port. + // + // XXX How do we handle this on a different server? + // XXX How do we make sure that the requestor is the + // right previous owner? + if restoring { + logrus.Warnf(">>> Ignoring error %s for restore", err) + err = nil + } else { + return nat.PortBinding{}, err + } } network.PortMappings = append(network.PortMappings, host) diff --git a/daemon/networkdriver/ipallocator/allocator.go b/daemon/networkdriver/ipallocator/allocator.go index 554dbdd5b1877..7ecaa3b221a98 100644 --- a/daemon/networkdriver/ipallocator/allocator.go +++ b/daemon/networkdriver/ipallocator/allocator.go @@ -81,7 +81,7 @@ func (a *IPAllocator) RegisterSubnet(network *net.IPNet, subnet *net.IPNet) erro // will return the next available ip if the ip provided is nil. If the // ip provided is not nil it will validate that the provided ip is available // for use or return an error -func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) { +func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP, restoring bool) (net.IP, error) { a.mutex.Lock() defer a.mutex.Unlock() @@ -95,7 +95,7 @@ func (a *IPAllocator) RequestIP(network *net.IPNet, ip net.IP) (net.IP, error) { if ip == nil { return allocated.getNextIP() } - return allocated.checkIP(ip) + return allocated.checkIP(ip, restoring) } // ReleaseIP adds the provided ip back into the pool of @@ -110,8 +110,20 @@ func (a *IPAllocator) ReleaseIP(network *net.IPNet, ip net.IP) error { return nil } -func (allocated *allocatedMap) checkIP(ip net.IP) (net.IP, error) { +func (allocated *allocatedMap) checkIP(ip net.IP, restoring bool) (net.IP, error) { if _, ok := allocated.p[ip.String()]; ok { + // If we're restoring on the same Docker server, we + // should not error on "ip already allocated" because + // we didn't release it. Also, if the server was restarted, + // it reserved this IP address when coming up. + // + // XXX How do we handle this on a different server? + // XXX How do we make sure that the requestor is the + // right previous owner? + if restoring { + logrus.Warnf("using already allocated ip %v", ip) + return ip, nil + } return nil, ErrIPAlreadyAllocated } diff --git a/daemon/networkdriver/ipallocator/allocator_test.go b/daemon/networkdriver/ipallocator/allocator_test.go index fffe6e3389c6f..6c5c0e4dbcc4e 100644 --- a/daemon/networkdriver/ipallocator/allocator_test.go +++ b/daemon/networkdriver/ipallocator/allocator_test.go @@ -601,7 +601,7 @@ func TestRegisterBadTwice(t *testing.T) { Mask: []byte{255, 255, 255, 248}, } if err := a.RegisterSubnet(network, subnet); err != ErrNetworkAlreadyRegistered { - t.Fatalf("Expecteded ErrNetworkAlreadyRegistered error, got %v", err) + t.Fatalf("Expected ErrNetworkAlreadyRegistered error, got %v", err) } } diff --git a/daemon/networkdriver/portmapper/mapper.go b/daemon/networkdriver/portmapper/mapper.go index 8f79bae3f2deb..09952ba35b30b 100644 --- a/daemon/networkdriver/portmapper/mapper.go +++ b/daemon/networkdriver/portmapper/mapper.go @@ -132,6 +132,18 @@ func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int) (host return m.host, nil } +// re-apply all port mappings +func (pm *PortMapper) ReMapAll() { + logrus.Debugln("Re-applying all port mappings.") + for _, data := range pm.currentMappings { + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := pm.forward(iptables.Append, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + logrus.Errorf("Error on iptables add: %s", err) + } + } +} + func (pm *PortMapper) Unmap(host net.Addr) error { pm.lock.Lock() defer pm.lock.Unlock() diff --git a/daemon/networkdriver/portmapper/proxy.go b/daemon/networkdriver/portmapper/proxy.go index 5d0aa0be0d1c1..80b0027c703fd 100644 --- a/daemon/networkdriver/portmapper/proxy.go +++ b/daemon/networkdriver/portmapper/proxy.go @@ -84,7 +84,7 @@ func handleStopSignals(p proxy.Proxy) { s := make(chan os.Signal, 10) signal.Notify(s, os.Interrupt, syscall.SIGTERM, syscall.SIGSTOP) - for _ = range s { + for range s { p.Close() os.Exit(0) diff --git a/daemon/resize.go b/daemon/resize.go index fce06753e0f89..060634b13bfc6 100644 --- a/daemon/resize.go +++ b/daemon/resize.go @@ -1,48 +1,6 @@ package daemon -import ( - "fmt" - "strconv" - - "github.com/docker/docker/engine" -) - -func (daemon *Daemon) ContainerResize(job *engine.Job) error { - if len(job.Args) != 3 { - return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER HEIGHT WIDTH\n", job.Name) - } - name := job.Args[0] - height, err := strconv.Atoi(job.Args[1]) - if err != nil { - return err - } - width, err := strconv.Atoi(job.Args[2]) - if err != nil { - return err - } - container, err := daemon.Get(name) - if err != nil { - return err - } - if err := container.Resize(height, width); err != nil { - return err - } - return nil -} - -func (daemon *Daemon) ContainerExecResize(job *engine.Job) error { - if len(job.Args) != 3 { - return fmt.Errorf("Not enough arguments. Usage: %s EXEC HEIGHT WIDTH\n", job.Name) - } - name := job.Args[0] - height, err := strconv.Atoi(job.Args[1]) - if err != nil { - return err - } - width, err := strconv.Atoi(job.Args[2]) - if err != nil { - return err - } +func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { execConfig, err := daemon.getExecConfig(name) if err != nil { return err diff --git a/daemon/restart.go b/daemon/restart.go index 1bd2f8ca10dcd..86cc97d7e7354 100644 --- a/daemon/restart.go +++ b/daemon/restart.go @@ -1,27 +1,13 @@ package daemon -import ( - "fmt" +import "fmt" - "github.com/docker/docker/engine" -) - -func (daemon *Daemon) ContainerRestart(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } +func (daemon *Daemon) ContainerRestart(name string, seconds int) error { container, err := daemon.Get(name) if err != nil { return err } - if err := container.Restart(int(t)); err != nil { + if err := container.Restart(seconds); err != nil { return fmt.Errorf("Cannot restart container %s: %s\n", name, err) } container.LogEvent("restart") diff --git a/daemon/start.go b/daemon/start.go index 8de67b99670bf..09b8b2881a41a 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -3,18 +3,10 @@ package daemon import ( "fmt" - "github.com/docker/docker/engine" "github.com/docker/docker/runconfig" ) -func (daemon *Daemon) ContainerStart(job *engine.Job) error { - if len(job.Args) < 1 { - return fmt.Errorf("Usage: %s container_id", job.Name) - } - var ( - name = job.Args[0] - ) - +func (daemon *Daemon) ContainerStart(name string, hostConfig *runconfig.HostConfig) error { container, err := daemon.Get(name) if err != nil { return err @@ -28,15 +20,18 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) error { return fmt.Errorf("Container already started") } - // If no environment was set, then no hostconfig was passed. + if _, err = daemon.verifyHostConfig(hostConfig); err != nil { + return err + } + // This is kept for backward compatibility - hostconfig should be passed when // creating a container, not during start. - if len(job.Environ()) > 0 { - hostConfig := runconfig.ContainerHostConfigFromJob(job) + if hostConfig != nil { if err := daemon.setHostConfig(container, hostConfig); err != nil { return err } } + if err := container.Start(); err != nil { container.LogEvent("die") return fmt.Errorf("Cannot start container %s: %s", name, err) @@ -44,21 +39,3 @@ func (daemon *Daemon) ContainerStart(job *engine.Job) error { return nil } - -func (daemon *Daemon) setHostConfig(container *Container, hostConfig *runconfig.HostConfig) error { - container.Lock() - defer container.Unlock() - if err := parseSecurityOpt(container, hostConfig); err != nil { - return err - } - - // Register any links from the host config before starting the container - if err := daemon.RegisterLinks(container, hostConfig); err != nil { - return err - } - - container.hostConfig = hostConfig - container.toDisk() - - return nil -} diff --git a/daemon/state.go b/daemon/state.go index 6387e6fc535c9..d1f515678710f 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -13,6 +13,7 @@ type State struct { sync.Mutex Running bool Paused bool + Checkpointed bool Restarting bool OOMKilled bool removalInProgress bool // Not need for this to be persistent on disk. @@ -22,6 +23,7 @@ type State struct { Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time + CheckpointedAt time.Time waitChan chan struct{} } @@ -48,6 +50,10 @@ func (s *State) String() string { return "Removal In Progress" } + if s.Checkpointed { + return fmt.Sprintf("Checkpointed %s ago", units.HumanDuration(time.Now().UTC().Sub(s.CheckpointedAt))) + } + if s.Dead { return "Dead" } @@ -71,6 +77,10 @@ func (s *State) StateString() string { return "running" } + if s.Checkpointed { + return "checkpointed'" + } + if s.Dead { return "dead" } @@ -159,6 +169,7 @@ func (s *State) setRunning(pid int) { s.Running = true s.Paused = false s.Restarting = false + s.Checkpointed = false s.ExitCode = 0 s.Pid = pid s.StartedAt = time.Now().UTC() @@ -183,7 +194,7 @@ func (s *State) setStopped(exitStatus *execdriver.ExitStatus) { s.waitChan = make(chan struct{}) } -// SetRestarting is when docker hanldes the auto restart of containers when they are +// SetRestarting is when docker handles the auto restart of containers when they are // in the middle of a stop and being restarted again func (s *State) SetRestarting(exitStatus *execdriver.ExitStatus) { s.Lock() @@ -233,6 +244,27 @@ func (s *State) IsPaused() bool { return res } +func (s *State) SetCheckpointed(leaveRunning bool) { + s.Lock() + s.CheckpointedAt = time.Now().UTC() + s.Checkpointed = !leaveRunning + s.Running = leaveRunning + s.Paused = false + s.Restarting = false + // XXX Not sure if we need to close and recreate waitChan. + // close(s.waitChan) + // s.waitChan = make(chan struct{}) + s.Unlock() +} + +func (s *State) HasBeenCheckpointed() bool { + return s.CheckpointedAt != time.Time{} +} + +func (s *State) IsCheckpointed() bool { + return s.Checkpointed +} + func (s *State) SetRemovalInProgress() error { s.Lock() defer s.Unlock() diff --git a/daemon/stats.go b/daemon/stats.go index e40788013ff86..a95168d128784 100644 --- a/daemon/stats.go +++ b/daemon/stats.go @@ -2,20 +2,20 @@ package daemon import ( "encoding/json" + "io" "github.com/docker/docker/api/types" "github.com/docker/docker/daemon/execdriver" - "github.com/docker/docker/engine" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" ) -func (daemon *Daemon) ContainerStats(job *engine.Job) error { - updates, err := daemon.SubscribeToContainerStats(job.Args[0]) +func (daemon *Daemon) ContainerStats(name string, out io.Writer) error { + updates, err := daemon.SubscribeToContainerStats(name) if err != nil { return err } - enc := json.NewEncoder(job.Stdout) + enc := json.NewEncoder(out) for v := range updates { update := v.(*execdriver.ResourceStats) ss := convertToAPITypes(update.Stats) @@ -24,7 +24,7 @@ func (daemon *Daemon) ContainerStats(job *engine.Job) error { ss.CpuStats.SystemUsage = update.SystemUsage if err := enc.Encode(ss); err != nil { // TODO: handle the specific broken pipe - daemon.UnsubscribeToContainerStats(job.Args[0], updates) + daemon.UnsubscribeToContainerStats(name, updates) return err } } diff --git a/daemon/stats_collector.go b/daemon/stats_collector.go index 926dd256e4c37..22239743a69d5 100644 --- a/daemon/stats_collector.go +++ b/daemon/stats_collector.go @@ -76,22 +76,42 @@ func (s *statsCollector) unsubscribe(c *Container, ch chan interface{}) { } func (s *statsCollector) run() { - for _ = range time.Tick(s.interval) { + type publishersPair struct { + container *Container + publisher *pubsub.Publisher + } + // we cannot determine the capacity here. + // it will grow enough in first iteration + var pairs []publishersPair + + for range time.Tick(s.interval) { + systemUsage, err := s.getSystemCpuUsage() + if err != nil { + logrus.Errorf("collecting system cpu usage: %v", err) + continue + } + + // it does not make sense in the first iteration, + // but saves allocations in further iterations + pairs = pairs[:0] + + s.m.Lock() for container, publisher := range s.publishers { - systemUsage, err := s.getSystemCpuUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage for %s: %v", container.ID, err) - continue - } - stats, err := container.Stats() + // copy pointers here to release the lock ASAP + pairs = append(pairs, publishersPair{container, publisher}) + } + s.m.Unlock() + + for _, pair := range pairs { + stats, err := pair.container.Stats() if err != nil { if err != execdriver.ErrNotRunning { - logrus.Errorf("collecting stats for %s: %v", container.ID, err) + logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) } continue } stats.SystemUsage = systemUsage - publisher.Publish(stats) + pair.publisher.Publish(stats) } } } diff --git a/daemon/stop.go b/daemon/stop.go index 871683be91921..b481f87efb692 100644 --- a/daemon/stop.go +++ b/daemon/stop.go @@ -1,22 +1,8 @@ package daemon -import ( - "fmt" +import "fmt" - "github.com/docker/docker/engine" -) - -func (daemon *Daemon) ContainerStop(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s CONTAINER\n", job.Name) - } - var ( - name = job.Args[0] - t = 10 - ) - if job.EnvExists("t") { - t = job.GetenvInt("t") - } +func (daemon *Daemon) ContainerStop(name string, seconds int) error { container, err := daemon.Get(name) if err != nil { return err @@ -24,7 +10,7 @@ func (daemon *Daemon) ContainerStop(job *engine.Job) error { if !container.IsRunning() { return fmt.Errorf("Container already stopped") } - if err := container.Stop(int(t)); err != nil { + if err := container.Stop(seconds); err != nil { return fmt.Errorf("Cannot stop container %s: %s\n", name, err) } container.LogEvent("stop") diff --git a/daemon/top.go b/daemon/top.go index 1e8c39987c9b4..14b252370588f 100644 --- a/daemon/top.go +++ b/daemon/top.go @@ -6,54 +6,48 @@ import ( "strconv" "strings" - "github.com/docker/docker/engine" + "github.com/docker/docker/api/types" ) -func (daemon *Daemon) ContainerTop(job *engine.Job) error { - if len(job.Args) != 1 && len(job.Args) != 2 { - return fmt.Errorf("Not enough arguments. Usage: %s CONTAINER [PS_ARGS]\n", job.Name) - } - var ( - name = job.Args[0] +func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { + if psArgs == "" { psArgs = "-ef" - ) - - if len(job.Args) == 2 && job.Args[1] != "" { - psArgs = job.Args[1] } container, err := daemon.Get(name) if err != nil { - return err + return nil, err } + if !container.IsRunning() { - return fmt.Errorf("Container %s is not running", name) + return nil, fmt.Errorf("Container %s is not running", name) } + pids, err := daemon.ExecutionDriver().GetPidsForContainer(container.ID) if err != nil { - return err + return nil, err } + output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() if err != nil { - return fmt.Errorf("Error running ps: %s", err) + return nil, fmt.Errorf("Error running ps: %s", err) } + procList := &types.ContainerProcessList{} + lines := strings.Split(string(output), "\n") - header := strings.Fields(lines[0]) - out := &engine.Env{} - out.SetList("Titles", header) + procList.Titles = strings.Fields(lines[0]) pidIndex := -1 - for i, name := range header { + for i, name := range procList.Titles { if name == "PID" { pidIndex = i } } if pidIndex == -1 { - return fmt.Errorf("Couldn't find PID field in ps output") + return nil, fmt.Errorf("Couldn't find PID field in ps output") } - processes := [][]string{} for _, line := range lines[1:] { if len(line) == 0 { continue @@ -61,20 +55,18 @@ func (daemon *Daemon) ContainerTop(job *engine.Job) error { fields := strings.Fields(line) p, err := strconv.Atoi(fields[pidIndex]) if err != nil { - return fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) + return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) } for _, pid := range pids { if pid == p { // Make sure number of fields equals number of header titles // merging "overhanging" fields - process := fields[:len(header)-1] - process = append(process, strings.Join(fields[len(header)-1:], " ")) - processes = append(processes, process) + process := fields[:len(procList.Titles)-1] + process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) + procList.Processes = append(procList.Processes, process) } } } - out.SetJson("Processes", processes) - out.WriteTo(job.Stdout) - return nil + return procList, nil } diff --git a/daemon/utils.go b/daemon/utils.go index 6202e6d961677..ec001ca071574 100644 --- a/daemon/utils.go +++ b/daemon/utils.go @@ -42,7 +42,8 @@ func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) ([]string, error) // merge in the lxc conf options into the generic config map if lxcConf := hostConfig.LxcConf; lxcConf != nil { - for _, pair := range lxcConf { + lxSlice := lxcConf.Slice() + for _, pair := range lxSlice { // because lxc conf gets the driver name lxc.XXXX we need to trim it off // and let the lxc driver add it back later if needed if !strings.Contains(pair.Key, ".") { diff --git a/daemon/utils_test.go b/daemon/utils_test.go index ff5b082ba5385..f81843847c86d 100644 --- a/daemon/utils_test.go +++ b/daemon/utils_test.go @@ -4,14 +4,14 @@ import ( "testing" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) func TestMergeLxcConfig(t *testing.T) { + kv := []runconfig.KeyValuePair{ + {"lxc.cgroups.cpuset", "1,2"}, + } hostConfig := &runconfig.HostConfig{ - LxcConf: []utils.KeyValuePair{ - {Key: "lxc.cgroups.cpuset", Value: "1,2"}, - }, + LxcConf: runconfig.NewLxcConfig(kv), } out, err := mergeLxcConfIntoOptions(hostConfig) diff --git a/daemon/volumes.go b/daemon/volumes.go index f40fdd3e49df1..ea117a1e3fa60 100644 --- a/daemon/volumes.go +++ b/daemon/volumes.go @@ -2,7 +2,6 @@ package daemon import ( "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -12,31 +11,16 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/volumes" ) -type Mount struct { - MountToPath string - container *Container - volume *volumes.Volume - Writable bool - copyData bool - from *Container - isBind bool -} - -func (mnt *Mount) Export(resource string) (io.ReadCloser, error) { - var name string - if resource == mnt.MountToPath[1:] { - name = filepath.Base(resource) - } - path, err := filepath.Rel(mnt.MountToPath[1:], resource) - if err != nil { - return nil, err - } - return mnt.volume.Export(path, name) +type volumeMount struct { + containerPath string + hostPath string + writable bool + copyData bool + from string } func (container *Container) prepareVolumes() error { @@ -45,70 +29,119 @@ func (container *Container) prepareVolumes() error { container.VolumesRW = make(map[string]bool) } + if len(container.hostConfig.VolumesFrom) > 0 && container.AppliedVolumesFrom == nil { + container.AppliedVolumesFrom = make(map[string]struct{}) + } return container.createVolumes() } -// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order -func (container *Container) sortedVolumeMounts() []string { - var mountPaths []string - for path := range container.Volumes { - mountPaths = append(mountPaths, path) +func (container *Container) createVolumes() error { + mounts := make(map[string]*volumeMount) + + // get the normal volumes + for path := range container.Config.Volumes { + path = filepath.Clean(path) + // skip if there is already a volume for this container path + if _, exists := container.Volumes[path]; exists { + continue + } + + realPath, err := container.GetResourcePath(path) + if err != nil { + return err + } + if stat, err := os.Stat(realPath); err == nil { + if !stat.IsDir() { + return fmt.Errorf("can't mount to container path, file exists - %s", path) + } + } + + mnt := &volumeMount{ + containerPath: path, + writable: true, + copyData: true, + } + mounts[mnt.containerPath] = mnt } - sort.Strings(mountPaths) - return mountPaths -} + // Get all the bind mounts + // track bind paths separately due to #10618 + bindPaths := make(map[string]struct{}) + for _, spec := range container.hostConfig.Binds { + mnt, err := parseBindMountSpec(spec) + if err != nil { + return err + } -func (container *Container) createVolumes() error { - mounts, err := container.parseVolumeMountConfig() - if err != nil { - return err + // #10618 + if _, exists := bindPaths[mnt.containerPath]; exists { + return fmt.Errorf("Duplicate volume mount %s", mnt.containerPath) + } + + bindPaths[mnt.containerPath] = struct{}{} + mounts[mnt.containerPath] = mnt } - for _, mnt := range mounts { - if err := mnt.initialize(); err != nil { + // Get volumes from + for _, from := range container.hostConfig.VolumesFrom { + cID, mode, err := parseVolumesFromSpec(from) + if err != nil { return err } + if _, exists := container.AppliedVolumesFrom[cID]; exists { + // skip since it's already been applied + continue + } + + c, err := container.daemon.Get(cID) + if err != nil { + return fmt.Errorf("container %s not found, impossible to mount its volumes", cID) + } + + for _, mnt := range c.volumeMounts() { + mnt.writable = mnt.writable && (mode == "rw") + mnt.from = cID + mounts[mnt.containerPath] = mnt + } } - // On every start, this will apply any new `VolumesFrom` entries passed in via HostConfig, which may override volumes set in `create` - return container.applyVolumesFrom() -} + for _, mnt := range mounts { + containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, mnt.containerPath), container.basefs) + if err != nil { + return err + } -func (m *Mount) initialize() error { - // No need to initialize anything since it's already been initialized - if hostPath, exists := m.container.Volumes[m.MountToPath]; exists { - // If this is a bind-mount/volumes-from, maybe it was passed in at start instead of create - // We need to make sure bind-mounts/volumes-from passed on start can override existing ones. - if (!m.volume.IsBindMount && !m.isBind) && m.from == nil { - return nil + // Create the actual volume + v, err := container.daemon.volumes.FindOrCreateVolume(mnt.hostPath, mnt.writable) + if err != nil { + return err } - if m.volume.Path == hostPath { - return nil + + container.VolumesRW[mnt.containerPath] = mnt.writable + container.Volumes[mnt.containerPath] = v.Path + v.AddContainer(container.ID) + if mnt.from != "" { + container.AppliedVolumesFrom[mnt.from] = struct{}{} } - // Make sure we remove these old volumes we don't actually want now. - // Ignore any errors here since this is just cleanup, maybe someone volumes-from'd this volume - if v := m.container.daemon.volumes.Get(hostPath); v != nil { - v.RemoveContainer(m.container.ID) - m.container.daemon.volumes.Delete(v.Path) + if mnt.writable && mnt.copyData { + // Copy whatever is in the container at the containerPath to the volume + copyExistingContents(containerMntPath, v.Path) } } - // This is the full path to container fs + mntToPath - containerMntPath, err := symlink.FollowSymlinkInScope(filepath.Join(m.container.basefs, m.MountToPath), m.container.basefs) - if err != nil { - return err - } - m.container.VolumesRW[m.MountToPath] = m.Writable - m.container.Volumes[m.MountToPath] = m.volume.Path - m.volume.AddContainer(m.container.ID) - if m.Writable && m.copyData { - // Copy whatever is in the container at the mntToPath to the volume - copyExistingContents(containerMntPath, m.volume.Path) + return nil +} + +// sortedVolumeMounts returns the list of container volume mount points sorted in lexicographic order +func (container *Container) sortedVolumeMounts() []string { + var mountPaths []string + for path := range container.Volumes { + mountPaths = append(mountPaths, path) } - return nil + sort.Strings(mountPaths) + return mountPaths } func (container *Container) VolumePaths() map[string]struct{} { @@ -151,97 +184,30 @@ func (container *Container) derefVolumes() { } } -func (container *Container) parseVolumeMountConfig() (map[string]*Mount, error) { - var mounts = make(map[string]*Mount) - // Get all the bind mounts - for _, spec := range container.hostConfig.Binds { - path, mountToPath, writable, err := parseBindMountSpec(spec) - if err != nil { - return nil, err - } - // Check if a bind mount has already been specified for the same container path - if m, exists := mounts[mountToPath]; exists { - return nil, fmt.Errorf("Duplicate volume %q: %q already in use, mounted from %q", path, mountToPath, m.volume.Path) - } - // Check if a volume already exists for this and use it - vol, err := container.daemon.volumes.FindOrCreateVolume(path, writable) - if err != nil { - return nil, err - } - mounts[mountToPath] = &Mount{ - container: container, - volume: vol, - MountToPath: mountToPath, - Writable: writable, - isBind: true, // in case the volume itself is a normal volume, but is being mounted in as a bindmount here - } - } - - // Get the rest of the volumes - for path := range container.Config.Volumes { - // Check if this is already added as a bind-mount - path = filepath.Clean(path) - if _, exists := mounts[path]; exists { - continue - } - - // Check if this has already been created - if _, exists := container.Volumes[path]; exists { - continue - } - realPath, err := container.getResourcePath(path) - if err != nil { - return nil, fmt.Errorf("failed to evaluate the absolute path of symlink") - } - if stat, err := os.Stat(realPath); err == nil { - if !stat.IsDir() { - return nil, fmt.Errorf("file exists at %s, can't create volume there", realPath) - } - } - - vol, err := container.daemon.volumes.FindOrCreateVolume("", true) - if err != nil { - return nil, err - } - mounts[path] = &Mount{ - container: container, - MountToPath: path, - volume: vol, - Writable: true, - copyData: true, - } - } - - return mounts, nil -} - -func parseBindMountSpec(spec string) (string, string, bool, error) { - var ( - path, mountToPath string - writable bool - arr = strings.Split(spec, ":") - ) +func parseBindMountSpec(spec string) (*volumeMount, error) { + arr := strings.Split(spec, ":") + mnt := &volumeMount{} switch len(arr) { case 2: - path = arr[0] - mountToPath = arr[1] - writable = true + mnt.hostPath = arr[0] + mnt.containerPath = arr[1] + mnt.writable = true case 3: - path = arr[0] - mountToPath = arr[1] - writable = validMountMode(arr[2]) && arr[2] == "rw" + mnt.hostPath = arr[0] + mnt.containerPath = arr[1] + mnt.writable = validMountMode(arr[2]) && arr[2] == "rw" default: - return "", "", false, fmt.Errorf("Invalid volume specification: %s", spec) + return nil, fmt.Errorf("Invalid volume specification: %s", spec) } - if !filepath.IsAbs(path) { - return "", "", false, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", path) + if !filepath.IsAbs(mnt.hostPath) { + return nil, fmt.Errorf("cannot bind mount volume: %s volume paths must be absolute.", mnt.hostPath) } - path = filepath.Clean(path) - mountToPath = filepath.Clean(mountToPath) - return path, mountToPath, writable, nil + mnt.hostPath = filepath.Clean(mnt.hostPath) + mnt.containerPath = filepath.Clean(mnt.containerPath) + return mnt, nil } func parseVolumesFromSpec(spec string) (string, string, error) { @@ -263,54 +229,6 @@ func parseVolumesFromSpec(spec string) (string, string, error) { return id, mode, nil } -func (container *Container) applyVolumesFrom() error { - volumesFrom := container.hostConfig.VolumesFrom - if len(volumesFrom) > 0 && container.AppliedVolumesFrom == nil { - container.AppliedVolumesFrom = make(map[string]struct{}) - } - - mountGroups := make(map[string][]*Mount) - - for _, spec := range volumesFrom { - id, mode, err := parseVolumesFromSpec(spec) - if err != nil { - return err - } - if _, exists := container.AppliedVolumesFrom[id]; exists { - // Don't try to apply these since they've already been applied - continue - } - - c, err := container.daemon.Get(id) - if err != nil { - return fmt.Errorf("Could not apply volumes of non-existent container %q.", id) - } - - var ( - fromMounts = c.VolumeMounts() - mounts []*Mount - ) - - for _, mnt := range fromMounts { - mnt.Writable = mnt.Writable && (mode == "rw") - mounts = append(mounts, mnt) - } - mountGroups[id] = mounts - } - - for id, mounts := range mountGroups { - for _, mnt := range mounts { - mnt.from = mnt.container - mnt.container = container - if err := mnt.initialize(); err != nil { - return err - } - } - container.AppliedVolumesFrom[id] = struct{}{} - } - return nil -} - func validMountMode(mode string) bool { validModes := map[string]bool{ "rw": true, @@ -320,6 +238,20 @@ func validMountMode(mode string) bool { return validModes[mode] } +func (container *Container) specialMounts() []execdriver.Mount { + var mounts []execdriver.Mount + if container.ResolvConfPath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}) + } + if container.HostnamePath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) + } + if container.HostsPath != "" { + mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) + } + return mounts +} + func (container *Container) setupMounts() error { mounts := []execdriver.Mount{} @@ -336,29 +268,23 @@ func (container *Container) setupMounts() error { }) } - if container.ResolvConfPath != "" { - mounts = append(mounts, execdriver.Mount{Source: container.ResolvConfPath, Destination: "/etc/resolv.conf", Writable: true, Private: true}) - } - - if container.HostnamePath != "" { - mounts = append(mounts, execdriver.Mount{Source: container.HostnamePath, Destination: "/etc/hostname", Writable: true, Private: true}) - } - - if container.HostsPath != "" { - mounts = append(mounts, execdriver.Mount{Source: container.HostsPath, Destination: "/etc/hosts", Writable: true, Private: true}) - } + mounts = append(mounts, container.specialMounts()...) container.command.Mounts = mounts return nil } -func (container *Container) VolumeMounts() map[string]*Mount { - mounts := make(map[string]*Mount) +func (container *Container) volumeMounts() map[string]*volumeMount { + mounts := make(map[string]*volumeMount) - for mountToPath, path := range container.Volumes { - if v := container.daemon.volumes.Get(path); v != nil { - mounts[mountToPath] = &Mount{volume: v, container: container, MountToPath: mountToPath, Writable: container.VolumesRW[mountToPath]} + for containerPath, path := range container.Volumes { + v := container.daemon.volumes.Get(path) + if v == nil { + // This should never happen + logrus.Debugf("reference by container %s to non-existent volume path %s", container.ID, path) + continue } + mounts[containerPath] = &volumeMount{hostPath: path, containerPath: containerPath, writable: container.VolumesRW[containerPath]} } return mounts @@ -387,17 +313,56 @@ func copyExistingContents(source, destination string) error { return copyOwnership(source, destination) } -// copyOwnership copies the permissions and uid:gid of the source file -// into the destination file -func copyOwnership(source, destination string) error { - stat, err := system.Stat(source) - if err != nil { - return err +func (container *Container) mountVolumes() error { + for dest, source := range container.Volumes { + v := container.daemon.volumes.Get(source) + if v == nil { + return fmt.Errorf("could not find volume for %s:%s, impossible to mount", source, dest) + } + + destPath, err := container.GetResourcePath(dest) + if err != nil { + return err + } + + if err := mount.Mount(source, destPath, "bind", "rbind,rw"); err != nil { + return fmt.Errorf("error while mounting volume %s: %v", source, err) + } } - if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil { - return err + for _, mnt := range container.specialMounts() { + destPath, err := container.GetResourcePath(mnt.Destination) + if err != nil { + return err + } + if err := mount.Mount(mnt.Source, destPath, "bind", "bind,rw"); err != nil { + return fmt.Errorf("error while mounting volume %s: %v", mnt.Source, err) + } } + return nil +} - return os.Chmod(destination, os.FileMode(stat.Mode())) +func (container *Container) unmountVolumes() { + for dest := range container.Volumes { + destPath, err := container.GetResourcePath(dest) + if err != nil { + logrus.Errorf("error while unmounting volumes %s: %v", destPath, err) + continue + } + if err := mount.ForceUnmount(destPath); err != nil { + logrus.Errorf("error while unmounting volumes %s: %v", destPath, err) + continue + } + } + + for _, mnt := range container.specialMounts() { + destPath, err := container.GetResourcePath(mnt.Destination) + if err != nil { + logrus.Errorf("error while unmounting volumes %s: %v", destPath, err) + continue + } + if err := mount.ForceUnmount(destPath); err != nil { + logrus.Errorf("error while unmounting volumes %s: %v", destPath, err) + } + } } diff --git a/daemon/volumes_linux.go b/daemon/volumes_linux.go new file mode 100644 index 0000000000000..93fea816598a2 --- /dev/null +++ b/daemon/volumes_linux.go @@ -0,0 +1,24 @@ +// +build !windows + +package daemon + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// copyOwnership copies the permissions and uid:gid of the source file +// into the destination file +func copyOwnership(source, destination string) error { + stat, err := system.Stat(source) + if err != nil { + return err + } + + if err := os.Chown(destination, int(stat.Uid()), int(stat.Gid())); err != nil { + return err + } + + return os.Chmod(destination, os.FileMode(stat.Mode())) +} diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go new file mode 100644 index 0000000000000..ca1199a542d15 --- /dev/null +++ b/daemon/volumes_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package daemon + +// Not supported on Windows +func copyOwnership(source, destination string) error { + return nil +} diff --git a/daemon/wait.go b/daemon/wait.go deleted file mode 100644 index 5c1f44beb34f6..0000000000000 --- a/daemon/wait.go +++ /dev/null @@ -1,22 +0,0 @@ -package daemon - -import ( - "fmt" - "time" - - "github.com/docker/docker/engine" -) - -func (daemon *Daemon) ContainerWait(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("Usage: %s", job.Name) - } - name := job.Args[0] - container, err := daemon.Get(name) - if err != nil { - return fmt.Errorf("%s: %v", job.Name, err) - } - status, _ := container.WaitStop(-1 * time.Second) - job.Printf("%d\n", status) - return nil -} diff --git a/docker/daemon.go b/docker/daemon.go index b1a92c52e592b..c6241b60602fb 100644 --- a/docker/daemon.go +++ b/docker/daemon.go @@ -7,12 +7,10 @@ import ( "io" "os" "path/filepath" - "strings" "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/builder" - "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" _ "github.com/docker/docker/daemon/execdriver/lxc" _ "github.com/docker/docker/daemon/execdriver/native" @@ -93,67 +91,27 @@ func mainDaemon() { } daemonCfg.TrustKeyPath = *flTrustKey - // Load builtins - if err := builtins.Register(eng); err != nil { - logrus.Fatal(err) + serverConfig := &apiserver.ServerConfig{ + Logging: true, + EnableCors: daemonCfg.EnableCors, + CorsHeaders: daemonCfg.CorsHeaders, + Version: dockerversion.VERSION, + SocketGroup: daemonCfg.SocketGroup, + Tls: *flTls, + TlsVerify: *flTlsVerify, + TlsCa: *flCa, + TlsCert: *flCert, + TlsKey: *flKey, } - registryService := registry.NewService(registryCfg) - // load the daemon in the background so we can immediately start - // the http api so that connections don't fail while the daemon - // is booting - daemonInitWait := make(chan error) - go func() { - d, err := daemon.NewDaemon(daemonCfg, eng, registryService) - if err != nil { - daemonInitWait <- err - return - } - - logrus.WithFields(logrus.Fields{ - "version": dockerversion.VERSION, - "commit": dockerversion.GITCOMMIT, - "execdriver": d.ExecutionDriver().Name(), - "graphdriver": d.GraphDriver().String(), - }).Info("Docker daemon") - - if err := d.Install(eng); err != nil { - daemonInitWait <- err - return - } - - b := &builder.BuilderJob{eng, d} - b.Install() - - // after the daemon is done setting up we can tell the api to start - // accepting connections - if err := eng.Job("acceptconnections").Run(); err != nil { - daemonInitWait <- err - return - } - daemonInitWait <- nil - }() + api := apiserver.New(serverConfig, eng) - // Serve api - job := eng.Job("serveapi", flHosts...) - job.SetenvBool("Logging", true) - job.SetenvBool("EnableCors", daemonCfg.EnableCors) - job.Setenv("CorsHeaders", daemonCfg.CorsHeaders) - job.Setenv("Version", dockerversion.VERSION) - job.Setenv("SocketGroup", daemonCfg.SocketGroup) - - job.SetenvBool("Tls", *flTls) - job.SetenvBool("TlsVerify", *flTlsVerify) - job.Setenv("TlsCa", *flCa) - job.Setenv("TlsCert", *flCert) - job.Setenv("TlsKey", *flKey) - - // The serve API job never exits unless an error occurs + // The serve API routine never exits unless an error occurs // We need to start it as a goroutine and wait on it so // daemon doesn't exit serveAPIWait := make(chan error) go func() { - if err := job.Run(); err != nil { + if err := api.ServeApi(flHosts); err != nil { logrus.Errorf("ServeAPI error: %v", err) serveAPIWait <- err return @@ -161,35 +119,38 @@ func mainDaemon() { serveAPIWait <- nil }() - // Wait for the daemon startup goroutine to finish - // This makes sure we can actually cleanly shutdown the daemon - logrus.Debug("waiting for daemon to initialize") - errDaemon := <-daemonInitWait - if errDaemon != nil { + registryService := registry.NewService(registryCfg) + d, err := daemon.NewDaemon(daemonCfg, eng, registryService) + if err != nil { eng.Shutdown() - outStr := fmt.Sprintf("Shutting down daemon due to errors: %v", errDaemon) - if strings.Contains(errDaemon.Error(), "engine is shutdown") { - // if the error is "engine is shutdown", we've already reported (or - // will report below in API server errors) the error - outStr = "Shutting down daemon due to reported errors" - } - // we must "fatal" exit here as the API server may be happy to - // continue listening forever if the error had no impact to API - logrus.Fatal(outStr) - } else { - logrus.Info("Daemon has completed initialization") + logrus.Fatalf("Error starting daemon: %v", err) + } + + if err := d.Install(eng); err != nil { + eng.Shutdown() + logrus.Fatalf("Error starting daemon: %v", err) } + logrus.Info("Daemon has completed initialization") + + logrus.WithFields(logrus.Fields{ + "version": dockerversion.VERSION, + "commit": dockerversion.GITCOMMIT, + "execdriver": d.ExecutionDriver().Name(), + "graphdriver": d.GraphDriver().String(), + }).Info("Docker daemon") + + // after the daemon is done setting up we can tell the api to start + // accepting connections with specified daemon + api.AcceptConnections(d) + // Daemon is fully initialized and handling API traffic // Wait for serve API job to complete errAPI := <-serveAPIWait - // If we have an error here it is unique to API (as daemonErr would have - // exited the daemon process above) eng.Shutdown() if errAPI != nil { logrus.Fatalf("Shutting down due to ServeAPI error: %v", errAPI) } - } // currentUserIsOwner checks whether the current user is the owner of the given diff --git a/docker/docker.go b/docker/docker.go index c9b2c77b02c2f..1096b840f8468 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -9,13 +9,12 @@ import ( "strings" "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" "github.com/docker/docker/api/client" "github.com/docker/docker/autogen/dockerversion" + "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" ) const ( @@ -64,9 +63,9 @@ func main() { defaultHost := os.Getenv("DOCKER_HOST") if defaultHost == "" || *flDaemon { // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", api.DEFAULTUNIXSOCKET) + defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) } - defaultHost, err := api.ValidateHost(defaultHost) + defaultHost, err := opts.ValidateHost(defaultHost) if err != nil { logrus.Fatal(err) } @@ -136,7 +135,7 @@ func main() { } if err := cli.Cmd(flag.Args()...); err != nil { - if sterr, ok := err.(*utils.StatusError); ok { + if sterr, ok := err.(client.StatusError); ok { if sterr.Status != "" { logrus.Println(sterr.Status) } diff --git a/docker/flags.go b/docker/flags.go index 7f0c10d2d3dbd..969f3571be53e 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -78,6 +78,7 @@ func init() { for _, command := range [][]string{ {"attach", "Attach to a running container"}, {"build", "Build an image from a Dockerfile"}, + {"checkpoint", "Checkpoint one or more running containers"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders from a container's filesystem to the host path"}, {"create", "Create a new container"}, @@ -102,6 +103,7 @@ func init() { {"push", "Push an image or a repository to a Docker registry server"}, {"rename", "Rename an existing container"}, {"restart", "Restart a running container"}, + {"restore", "Restore one or more checkpointed containers"}, {"rm", "Remove one or more containers"}, {"rmi", "Remove one or more images"}, {"run", "Run a command in a new container"}, @@ -116,7 +118,7 @@ func init() { {"version", "Show the Docker version information"}, {"wait", "Block until a container stops, then print its exit code"}, } { - help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1]) + help += fmt.Sprintf(" %-11.11s%s\n", command[0], command[1]) } help += "\nRun 'docker COMMAND --help' for more information on a command." fmt.Fprintf(os.Stdout, "%s\n", help) diff --git a/docs/Dockerfile b/docs/Dockerfile index 7914abf38007a..e30d4bbd540d9 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -7,10 +7,9 @@ MAINTAINER Sven Dowideit (@SvenDowideit) # This section ensures we pull the correct version of each # sub project ENV COMPOSE_BRANCH release -ENV SWARM_BRANCH v0.1.0 -ENV MACHINE_BRANCH v0.1.0 -ENV DISTRIB_BRANCH master - +ENV SWARM_BRANCH v0.2.0 +ENV MACHINE_BRANCH master +ENV DISTRIB_BRANCH release/2.0 # TODO: need the full repo source to get the git version info @@ -28,64 +27,87 @@ COPY ./VERSION VERSION #COPY ./image/spec/v1.md /docs/sources/reference/image-spec-v1.md # TODO: don't do this - look at merging the yml file in build.sh -COPY ./mkdocs.yml mkdocs.yml -COPY ./s3_website.json s3_website.json -COPY ./release.sh release.sh - +COPY ./mkdocs.yml ./s3_website.json ./release.sh ./ +####################### # Docker Distribution -# -#ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/mkdocs.yml /docs/mkdocs-distribution.yml - -#ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/overview.md /docs/sources/distribution/overview.md -#RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/distribution/overview.md - -#ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/install.md /docs/sources/distribution/install.md -#RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/distribution/install.md - -#ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/architecture.md /docs/sources/distribution/architecture.md -#RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/distribution/architecture.md +######################## +#ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/mkdocs.yml /docs/mkdocs-distribution.yml +ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/notifications.png \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/images/registry.png \ + /docs/sources/registry/images/ + +ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/index.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/deploying.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/configuration.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storagedrivers.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/notifications.md \ + /docs/sources/registry/ + +ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/api.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/json.md \ + /docs/sources/registry/spec/ + +ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/s3.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/azure.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/filesystem.md \ + https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/storage-drivers/inmemory.md \ + /docs/sources/registry/storage-drivers/ + +ADD https://raw.githubusercontent.com/docker/distribution/${DISTRIB_BRANCH}/docs/spec/auth/token.md /docs/sources/registry/spec/auth/token.md + +RUN sed -i.old '1s;^;no_version_dropdown: true;' \ + /docs/sources/registry/*.md \ + /docs/sources/registry/spec/*.md \ + /docs/sources/registry/spec/auth/*.md + +####################### # Docker Swarm +####################### + #ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/mkdocs.yml /docs/mkdocs-swarm.yml ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/docs/index.md /docs/sources/swarm/index.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/index.md + ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/discovery/README.md /docs/sources/swarm/discovery.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/discovery.md + ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/api/README.md /docs/sources/swarm/API.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/API.md + ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/filter/README.md /docs/sources/swarm/scheduler/filter.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/filter.md + ADD https://raw.githubusercontent.com/docker/swarm/${SWARM_BRANCH}/scheduler/strategy/README.md /docs/sources/swarm/scheduler/strategy.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/scheduler/strategy.md +RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/swarm/*.md /docs/sources/swarm/scheduler/*.md + +####################### # Docker Machine +####################### #ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-machine.yml + ADD https://raw.githubusercontent.com/docker/machine/${MACHINE_BRANCH}/docs/index.md /docs/sources/machine/index.md RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/machine/index.md +####################### # Docker Compose +####################### + #ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/mkdocs.yml /docs/mkdocs-compose.yml -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/index.md /docs/sources/compose/index.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/index.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/install.md /docs/sources/compose/install.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/install.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/cli.md /docs/sources/compose/cli.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/cli.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/yml.md /docs/sources/compose/yml.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/yml.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/env.md /docs/sources/compose/env.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/env.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/completion.md /docs/sources/compose/completion.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/completion.md - -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/django.md /docs/sources/compose/django.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/django.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/rails.md /docs/sources/compose/rails.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/rails.md -ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/wordpress.md /docs/sources/compose/wordpress.md -RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/wordpress.md + +ADD https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/index.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/install.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/cli.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/yml.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/env.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/completion.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/django.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/rails.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/wordpress.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/extends.md \ + https://raw.githubusercontent.com/docker/compose/${COMPOSE_BRANCH}/docs/production.md \ + /docs/sources/compose/ + +RUN sed -i.old '1s;^;no_version_dropdown: true;' /docs/sources/compose/*.md # Then build everything together, ready for mkdocs RUN /docs/build.sh diff --git a/docs/README.md b/docs/README.md index 15fee1d364699..8ff25adab75e8 100755 --- a/docs/README.md +++ b/docs/README.md @@ -3,7 +3,7 @@ The source for Docker documentation is in this directory under `sources/`. Our documentation uses extended Markdown, as implemented by [MkDocs](http://mkdocs.org). The current release of the Docker documentation -resides on [http://docs.docker.com](http://docs.docker.com). +resides on [https://docs.docker.com](https://docs.docker.com). ## Understanding the documentation branches and processes @@ -11,7 +11,7 @@ Docker has two primary branches for documentation: | Branch | Description | URL (published via commit-hook) | |----------|--------------------------------|------------------------------------------------------------------------------| -| `docs` | Official release documentation | [http://docs.docker.com](http://docs.docker.com) | +| `docs` | Official release documentation | [https://docs.docker.com](https://docs.docker.com) | | `master` | Merged but unreleased development work | [http://docs.master.dockerproject.com](http://docs.master.dockerproject.com) | Additions and updates to upcoming releases are made in a feature branch off of diff --git a/docs/man/README.md b/docs/man/README.md index 402178a9c2cda..e25a925adb716 100644 --- a/docs/man/README.md +++ b/docs/man/README.md @@ -30,4 +30,4 @@ The `md2man` Docker container will process the Markdown files and generate the man pages inside the `docker/docs/man/man1` directory using Docker volumes. For more information on Docker volumes see the man page for `docker run` and also look at the article [Sharing Directories via Volumes] -(http://docs.docker.com/use/working_with_volumes/). +(https://docs.docker.com/use/working_with_volumes/). diff --git a/docs/man/docker-build.1.md b/docs/man/docker-build.1.md index fe6250fc195dc..4a8eba67dfe1f 100644 --- a/docs/man/docker-build.1.md +++ b/docs/man/docker-build.1.md @@ -17,6 +17,7 @@ docker-build - Build a new image from the source code at PATH [**-m**|**--memory**[=*MEMORY*]] [**--memory-swap**[=*MEMORY-SWAP*]] [**-c**|**--cpu-shares**[=*0*]] +[**--cpu-quota**[=*0*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] PATH | URL | - diff --git a/docs/man/docker-commit.1.md b/docs/man/docker-commit.1.md index 003cb6f69f0e7..5a290682d09f9 100644 --- a/docs/man/docker-commit.1.md +++ b/docs/man/docker-commit.1.md @@ -22,7 +22,7 @@ Using an existing container's name or ID you can create a new image. **-c** , **--change**=[] Apply specified Dockerfile instructions while committing the image - Supported Dockerfile instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY` + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` **--help** Print usage statement diff --git a/docs/man/docker-create.1.md b/docs/man/docker-create.1.md index 1a0da1b8f4731..7aba222b298e9 100644 --- a/docs/man/docker-create.1.md +++ b/docs/man/docker-create.1.md @@ -13,6 +13,8 @@ docker-create - Create a new container [**--cap-drop**[=*[]*]] [**--cidfile**[=*CIDFILE*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] +[**--cpu-quota**[=*0*]] [**--device**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns**[=*[]*]] @@ -74,6 +76,16 @@ IMAGE [COMMAND] [ARG...] **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**-cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + **--device**=[] Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) @@ -121,7 +133,7 @@ IMAGE [COMMAND] [ARG...] **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**--log-driver**="|*json-file*|*syslog*|*none*" +**--log-driver**="|*json-file*|*syslog*|*journald*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. diff --git a/docs/man/docker-exec.1.md b/docs/man/docker-exec.1.md index e7554419ecfe0..312fa397f5be9 100644 --- a/docs/man/docker-exec.1.md +++ b/docs/man/docker-exec.1.md @@ -9,7 +9,9 @@ docker-exec - Run a command in a running container [**-d**|**--detach**[=*false*]] [**--help**] [**-i**|**--interactive**[=*false*]] +[**--privileged**[=*false*]] [**-t**|**--tty**[=*false*]] +[**-u**|**--user**[=*USER*]] CONTAINER COMMAND [ARG...] # DESCRIPTION @@ -32,9 +34,24 @@ container is unpaused, and then run **-i**, **--interactive**=*true*|*false* Keep STDIN open even if not attached. The default is *false*. +**--privileged**=*true*|*false* + Give extended privileges to the process to run in a running container. The default is *false*. + + By default, the process run by docker exec in a running container +have the same capabilities of the container. By setting --privileged will give +all the capabilities to the process. + **-t**, **--tty**=*true*|*false* Allocate a pseudo-TTY. The default is *false*. +**-u**, **--user**="" + Sets the username or UID used and optionally the groupname or GID for the specified command. + + The followings examples are all valid: + --user [user | user:group | uid | uid:gid | user:gid | uid:group ] + + Without this argument the command will be run as root in the container. + The **-t** option is incompatible with a redirection of the docker client standard input. diff --git a/docs/man/docker-history.1.md b/docs/man/docker-history.1.md index 24f928c291899..268e378d0609a 100644 --- a/docs/man/docker-history.1.md +++ b/docs/man/docker-history.1.md @@ -19,6 +19,9 @@ Show the history of when and how an image was created. **--help** Print usage statement +**-H**. **--human**=*true*|*false* + Print sizes and dates in human readable format. The default is *true*. + **--no-trunc**=*true*|*false* Don't truncate output. The default is *false*. @@ -27,10 +30,20 @@ Show the history of when and how an image was created. # EXAMPLES $ docker history fedora - IMAGE CREATED CREATED BY SIZE + IMAGE CREATED CREATED BY SIZE COMMENT 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 10 months ago 0 B + 511136ea3c5a 10 months ago 0 B Imported from - + +## Display comments in the image history +The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. + + $ sudo docker history docker:scm + IMAGE CREATED CREATED BY SIZE COMMENT + 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image + 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB + c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B + 511136ea3c5a 19 months ago 0 B Imported from - # HISTORY April 2014, Originally compiled by William Henry (whenry at redhat dot com) diff --git a/docs/man/docker-import.1.md b/docs/man/docker-import.1.md index 6b3899b6a7153..b45bf5d4c6bbf 100644 --- a/docs/man/docker-import.1.md +++ b/docs/man/docker-import.1.md @@ -13,7 +13,7 @@ URL|- [REPOSITORY[:TAG]] # OPTIONS **-c**, **--change**=[] Apply specified Dockerfile instructions while importing the image - Supported Dockerfile instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY` + Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` # DESCRIPTION Create a new filesystem image from the contents of a tarball (`.tar`, diff --git a/docs/man/docker-inspect.1.md b/docs/man/docker-inspect.1.md index 85f6730004210..6f3cf51221b20 100644 --- a/docs/man/docker-inspect.1.md +++ b/docs/man/docker-inspect.1.md @@ -19,80 +19,120 @@ each result. # OPTIONS **--help** - Print usage statement + Print usage statement **-f**, **--format**="" - Format the output using the given go template. + Format the output using the given go template. # EXAMPLES ## Getting information on a container -To get information on a container use it's ID or instance name: +To get information on a container use its ID or instance name: - #docker inspect 1eb5fabf5a03 + $ docker inspect 1eb5fabf5a03 [{ - "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", - "Created": "2014-04-04T21:33:52.02361335Z", - "Path": "/usr/sbin/nginx", - "Args": [], - "Config": { - "Hostname": "1eb5fabf5a03", - "Domainname": "", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, + "AppArmorProfile": "", + "Args": [], + "Config": { + "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, - "AttachStderr": false, - "PortSpecs": null, - "ExposedPorts": { - "80/tcp": {} - }, - "Tty": true, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], "Cmd": [ "/usr/sbin/nginx" ], - "Dns": null, - "DnsSearch": null, - "Image": "summit/nginx", - "Volumes": null, - "VolumesFrom": "", - "WorkingDir": "", + "Domainname": "", "Entrypoint": null, + "Env": [ + "HOME=/", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "ExposedPorts": { + "80/tcp": {} + }, + "Hostname": "1eb5fabf5a03", + "Image": "summit/nginx", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "MacAddress": "", "NetworkDisabled": false, "OnBuild": null, - "Context": { - "mount_label": "system_u:object_r:svirt_sandbox_file_t:s0:c0,c650", - "process_label": "system_u:system_r:svirt_lxc_net_t:s0:c0,c650" - } - }, - "State": { - "Running": true, - "Pid": 858, - "ExitCode": 0, - "StartedAt": "2014-04-04T21:33:54.16259207Z", - "FinishedAt": "0001-01-01T00:00:00Z", - "Ghost": false + "OpenStdin": false, + "PortSpecs": null, + "StdinOnce": false, + "Tty": true, + "User": "", + "Volumes": null, + "WorkingDir": "", }, + "Created": "2014-04-04T21:33:52.02361335Z", + "Driver": "devicemapper", + "ExecDriver": "native-0.1", + "ExecIDs": null, + "HostConfig": { + "Binds": null, + "CapAdd": null, + "CapDrop": null, + "CgroupParent": "", + "ContainerIDFile": "", + "CpuShares": 512, + "CpusetCpus": "0,1", + "CpusetMems": "", + "Devices": [], + "Dns": null, + "DnsSearch": null, + "ExtraHosts": null, + "IpcMode": "", + "Links": null, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, + "LxcConf": null, + "Memory": 16777216, + "MemorySwap": -1, + "NetworkMode": "", + "PidMode": "", + "PortBindings": { + "80/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "80" + } + ] + }, + "Privileged": false, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "RestartPolicy": { + "MaximumRetryCount": 0, + "Name": "" + }, + "SecurityOpt": null, + "Ulimits": null, + "VolumesFrom": null + } + "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", + "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", + "ID": "1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b", "Image": "df53773a4390e25936f9fd3739e0c0e60a62d024ea7b669282b27e65ae8458e6", - "Labels": { - "com.example.vendor": "Acme", - "com.example.license": "GPL", - "com.example.version": "1.0" - }, + "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", + "MountLabel": "", + "Name": "/ecstatic_ptolemy", "NetworkSettings": { + "Bridge": "docker0", + "Gateway": "172.17.42.1", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, "IPAddress": "172.17.0.2", "IPPrefixLen": 16, - "Gateway": "172.17.42.1", - "Bridge": "docker0", + "IPv6Gateway": "", + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "MacAddress": "", "PortMapping": null, "Ports": { "80/tcp": [ @@ -103,41 +143,31 @@ To get information on a container use it's ID or instance name: ] } }, + "Path": "/usr/sbin/nginx", + "ProcessLabel": "", "ResolvConfPath": "/etc/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hostname", - "HostsPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/hosts", - "LogPath": "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log", - "Name": "/ecstatic_ptolemy", - "Driver": "devicemapper", - "ExecDriver": "native-0.1", + "RestartCount": 0, + "State": { + "Dead": false, + "Error": "", + "ExitCode": 0, + "FinishedAt": "0001-01-01T00:00:00Z", + "OOMKilled": false, + "Paused": false, + "Pid": 858, + "Restarting": false, + "Running": true, + "StartedAt": "2014-04-04T21:33:54.16259207Z", + }, "Volumes": {}, "VolumesRW": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "80" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "DriverOptions": { - "lxc": null - }, - "CliAddress": "" - } + } ## Getting the IP address of a container instance To get the IP address of a container use: - # docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 + $ docker inspect --format='{{.NetworkSettings.IPAddress}}' 1eb5fabf5a03 172.17.0.2 ## Listing all port bindings @@ -145,95 +175,96 @@ To get the IP address of a container use: One can loop over arrays and maps in the results to produce simple text output: - # docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ - {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 + $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ + {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' 1eb5fabf5a03 + 80/tcp -> 80 - 80/tcp -> 80 +You can get more information about how to write a go template from: +http://golang.org/pkg/text/template/. ## Getting information on an image Use an image's ID or name (e.g., repository/name[:tag]) to get information - on it. +on it. - # docker inspect 58394af37342 + $ docker inspect fc1203419df2 [{ - "id": "58394af373423902a1b97f209a31e3777932d9321ef10e64feaaa7b4df609cf9", - "parent": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", - "created": "2014-02-03T16:10:40.500814677Z", - "container": "f718f19a28a5147da49313c54620306243734bafa63c76942ef6f8c4b4113bc5", - "container_config": { - "Hostname": "88807319f25e", - "Domainname": "", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, + "Architecture": "amd64", + "Author": "", + "Comment": "", + "Config": { + "AttachStderr": false, "AttachStdin": false, "AttachStdout": false, - "AttachStderr": false, - "PortSpecs": null, + "Cmd": [ + "make", + "direct-test" + ], + "Domainname": "", + "Entrypoint": [ + "/dind" + ], + "Env": [ + "PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ], "ExposedPorts": null, - "Tty": false, + "Hostname": "242978536a06", + "Image": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a", + "Labels": {}, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": [], "OpenStdin": false, + "PortSpecs": null, "StdinOnce": false, - "Env": [ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], + "Tty": false, + "User": "", + "Volumes": null, + "WorkingDir": "/go/src/github.com/docker/libcontainer" + }, + "Container": "1c00417f3812a96d3ebc29e7fdee69f3d586d703ab89c8233fd4678d50707b39", + "ContainerConfig": { + "AttachStderr": false, + "AttachStdin": false, + "AttachStdout": false, "Cmd": [ "/bin/sh", "-c", - "#(nop) ADD fedora-20-dummy.tar.xz in /" + "#(nop) CMD [\"make\" \"direct-test\"]" ], - "Dns": null, - "DnsSearch": null, - "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", - "Volumes": null, - "VolumesFrom": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "OnBuild": null, - "Context": null - }, - "docker_version": "0.6.3", - "author": "I P Babble \u003clsm5@ipbabble.com\u003e - ./buildcontainers.sh", - "config": { - "Hostname": "88807319f25e", "Domainname": "", - "User": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "PortSpecs": null, + "Entrypoint": [ + "/dind" + ], + "Env": [ + "PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + ], "ExposedPorts": null, - "Tty": false, + "Hostname": "242978536a06", + "Image": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a", + "Labels": {}, + "MacAddress": "", + "NetworkDisabled": false, + "OnBuild": [], "OpenStdin": false, + "PortSpecs": null, "StdinOnce": false, - "Env": [ - "HOME=/", - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Cmd": null, - "Dns": null, - "DnsSearch": null, - "Image": "8abc22bad04266308ff408ca61cb8f6f4244a59308f7efc64e54b08b496c58db", + "Tty": false, + "User": "", "Volumes": null, - "VolumesFrom": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "OnBuild": null, - "Context": null + "WorkingDir": "/go/src/github.com/docker/libcontainer" }, - "architecture": "x86_64", - "Size": 385520098 + "Created": "2015-04-07T05:34:39.079489206Z", + "DockerVersion": "1.5.0-dev", + "Id": "fc1203419df26ca82cad1dd04c709cb1b8a8a947bd5bcbdfbef8241a76f031db", + "Os": "linux", + "Parent": "c2b774c744afc5bea603b5e6c5218539e506649326de3ea0135182f299d0519a", + "Size": 0, + "VirtualSize": 613136466 }] # HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) +April 2014, originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. June 2014, updated by Sven Dowideit +April 2015, updated by Qiang Huang diff --git a/docs/man/docker-login.1.md b/docs/man/docker-login.1.md index f73df77ed8db4..87ad31b703c9c 100644 --- a/docs/man/docker-login.1.md +++ b/docs/man/docker-login.1.md @@ -13,7 +13,7 @@ docker-login - Register or log in to a Docker registry. [SERVER] # DESCRIPTION -Register or log in to a Docker Registry Service located on the specified +Register or log in to a Docker Registry located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command uses Docker's public registry located at `https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. diff --git a/docs/man/docker-logout.1.md b/docs/man/docker-logout.1.md index d464f00fd1287..3726fd66ca02a 100644 --- a/docs/man/docker-logout.1.md +++ b/docs/man/docker-logout.1.md @@ -2,14 +2,14 @@ % Docker Community % JUNE 2014 # NAME -docker-logout - Log out from a Docker Registry Service. +docker-logout - Log out from a Docker Registry. # SYNOPSIS **docker logout** [SERVER] # DESCRIPTION -Log out of a Docker Registry Service located on the specified `SERVER`. You can +Log out of a Docker Registry located on the specified `SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you do not specify a `SERVER`, the command attempts to log you out of Docker's public registry located at `https://registry-1.docker.io/` by default. diff --git a/docs/man/docker-run.1.md b/docs/man/docker-run.1.md index 53d762cf61578..f2ce4b7774ae5 100644 --- a/docs/man/docker-run.1.md +++ b/docs/man/docker-run.1.md @@ -13,7 +13,9 @@ docker-run - Run a command in a new container [**--cap-drop**[=*[]*]] [**--cidfile**[=*CIDFILE*]] [**--cpuset-cpus**[=*CPUSET-CPUS*]] +[**--cpuset-mems**[=*CPUSET-MEMS*]] [**-d**|**--detach**[=*false*]] +[**--cpu-quota**[=*0*]] [**--device**[=*[]*]] [**--dns-search**[=*[]*]] [**--dns**[=*[]*]] @@ -134,6 +136,20 @@ division of CPU shares: **--cpuset-cpus**="" CPUs in which to allow execution (0-3, 0,1) +**--cpuset-mems**="" + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + + If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` +then processes in your Docker container will only use memory from the first +two memory nodes. + +**--cpu-quota**=0 + Limit the CPU CFS (Completely Fair Scheduler) quota + + Limit the container's CPU usage. By default, containers run with the full +CPU resource. This flag tell the kernel to restrict the container's CPU usage +to the quota you specify. + **-d**, **--detach**=*true*|*false* Detached mode: run the container in the background and print the new container ID. The default is *false*. @@ -222,7 +238,7 @@ which interface and port to use. **--lxc-conf**=[] (lxc exec-driver only) Add custom lxc options --lxc-conf="lxc.cgroup.cpuset.cpus = 0,1" -**--log-driver**="|*json-file*|*syslog*|*none*" +**--log-driver**="|*json-file*|*syslog*|*journald*|*none*" Logging driver for container. Default is defined by daemon `--log-driver` flag. **Warning**: `docker logs` command works only for `json-file` logging driver. @@ -416,7 +432,7 @@ you’d like to connect instead, as in: ## Sharing IPC between containers -Using shm_server.c available here: http://www.cs.cf.ac.uk/Dave/C/node27.html +Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html Testing `--ipc=host` mode: diff --git a/docs/man/docker-stats.1.md b/docs/man/docker-stats.1.md index a1adc7ecbaa51..f6fc3f7f23f1f 100644 --- a/docs/man/docker-stats.1.md +++ b/docs/man/docker-stats.1.md @@ -23,6 +23,6 @@ Run **docker stats** with multiple containers. $ docker stats redis1 redis2 CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O - redis1 0.07% 796 KiB/64 MiB 1.21% 788 B/648 B - redis2 0.07% 2.746 MiB/64 MiB 4.29% 1.266 KiB/648 B + redis1 0.07% 796 KB/64 MB 1.21% 788 B/648 B + redis2 0.07% 2.746 MB/64 MB 4.29% 1.266 KB/648 B diff --git a/docs/man/docker.1.md b/docs/man/docker.1.md index bcb9d25414297..4e7cafe4661d5 100644 --- a/docs/man/docker.1.md +++ b/docs/man/docker.1.md @@ -41,6 +41,12 @@ To see the man page for a command run **man docker **. **-d**, **--daemon**=*true*|*false* Enable daemon mode. Default is false. +**--default-gateway**="" + IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip) + +**--default-gateway-v6**="" + IPv6 address of the container default gateway + **--dns**="" Force Docker to use specific DNS servers @@ -89,8 +95,8 @@ unix://[/path/to/socket] to use. **--label**="[]" Set key=value labels to the daemon (displayed in `docker info`) -**--log-driver**="*json-file*|*syslog*|*none*" - Container's logging driver. Default is `default`. +**--log-driver**="*json-file*|*syslog*|*journald*|*none*" + Default driver for container logs. Default is `json-file`. **Warning**: `docker logs` command works only for `json-file` logging driver. **--mtu**=VALUE @@ -118,124 +124,165 @@ unix://[/path/to/socket] to use. **-v**, **--version**=*true*|*false* Print version information and quit. Default is false. +**--exec-opt**=[] + Set exec driver options. See EXEC DRIVER OPTIONS. + **--selinux-enabled**=*true*|*false* Enable selinux support. Default is false. SELinux does not presently support the BTRFS storage driver. # COMMANDS -**docker-attach(1)** +**attach** Attach to a running container + See **docker-attach(1)** for full documentation on the **attach** command. -**docker-build(1)** +**build** Build an image from a Dockerfile + See **docker-build(1)** for full documentation on the **build** command. -**docker-commit(1)** +**commit** Create a new image from a container's changes + See **docker-commit(1)** for full documentation on the **commit** command. -**docker-cp(1)** +**cp** Copy files/folders from a container's filesystem to the host + See **docker-cp(1)** for full documentation on the **cp** command. -**docker-create(1)** +**create** Create a new container + See **docker-create(1)** for full documentation on the **create** command. -**docker-diff(1)** +**diff** Inspect changes on a container's filesystem + See **docker-diff(1)** for full documentation on the **diff** command. -**docker-events(1)** +**events** Get real time events from the server + See **docker-events(1)** for full documentation on the **events** command. -**docker-exec(1)** +**exec** Run a command in a running container + See **docker-exec(1)** for full documentation on the **exec** command. -**docker-export(1)** +**export** Stream the contents of a container as a tar archive + See **docker-export(1)** for full documentation on the **export** command. -**docker-history(1)** +**history** Show the history of an image + See **docker-history(1)** for full documentation on the **history** command. -**docker-images(1)** +**images** List images + See **docker-images(1)** for full documentation on the **images** command. -**docker-import(1)** +**import** Create a new filesystem image from the contents of a tarball + See **docker-import(1)** for full documentation on the **import** command. -**docker-info(1)** +**info** Display system-wide information + See **docker-info(1)** for full documentation on the **info** command. -**docker-inspect(1)** +**inspect** Return low-level information on a container or image + See **docker-inspect(1)** for full documentation on the **inspect** command. -**docker-kill(1)** +**kill** Kill a running container (which includes the wrapper process and everything inside it) + See **docker-kill(1)** for full documentation on the **kill** command. -**docker-load(1)** +**load** Load an image from a tar archive + See **docker-load(1)** for full documentation on the **load** command. -**docker-login(1)** - Register or login to a Docker Registry Service +**login** + Register or login to a Docker Registry + See **docker-login(1)** for full documentation on the **login** command. -**docker-logout(1)** - Log the user out of a Docker Registry Service +**logout** + Log the user out of a Docker Registry + See **docker-logout(1)** for full documentation on the **logout** command. -**docker-logs(1)** +**logs** Fetch the logs of a container + See **docker-logs(1)** for full documentation on the **logs** command. -**docker-pause(1)** +**pause** Pause all processes within a container + See **docker-pause(1)** for full documentation on the **pause** command. -**docker-port(1)** +**port** Lookup the public-facing port which is NAT-ed to PRIVATE_PORT + See **docker-port(1)** for full documentation on the **port** command. -**docker-ps(1)** +**ps** List containers + See **docker-ps(1)** for full documentation on the **ps** command. -**docker-pull(1)** - Pull an image or a repository from a Docker Registry Service +**pull** + Pull an image or a repository from a Docker Registry + See **docker-pull(1)** for full documentation on the **pull** command. -**docker-push(1)** - Push an image or a repository to a Docker Registry Service +**push** + Push an image or a repository to a Docker Registry + See **docker-push(1)** for full documentation on the **push** command. -**docker-restart(1)** +**restart** Restart a running container + See **docker-restart(1)** for full documentation on the **restart** command. -**docker-rm(1)** +**rm** Remove one or more containers + See **docker-rm(1)** for full documentation on the **rm** command. -**docker-rmi(1)** +**rmi** Remove one or more images + See **docker-rmi(1)** for full documentation on the **rmi** command. -**docker-run(1)** +**run** Run a command in a new container + See **docker-run(1)** for full documentation on the **run** command. -**docker-save(1)** +**save** Save an image to a tar archive + See **docker-save(1)** for full documentation on the **save** command. -**docker-search(1)** +**search** Search for an image in the Docker index + See **docker-search(1)** for full documentation on the **search** command. -**docker-start(1)** +**start** Start a stopped container + See **docker-start(1)** for full documentation on the **start** command. -**docker-stats(1)** +**stats** Display a live stream of one or more containers' resource usage statistics + See **docker-stats(1)** for full documentation on the **stats** command. -**docker-stop(1)** +**stop** Stop a running container + See **docker-stop(1)** for full documentation on the **stop** command. -**docker-tag(1)** +**tag** Tag an image into a repository + See **docker-tag(1)** for full documentation on the **tag** command. -**docker-top(1)** +**top** Lookup the running processes of a container + See **docker-top(1)** for full documentation on the **top** command. -**docker-unpause(1)** +**unpause** Unpause all processes within a container + See **docker-unpause(1)** for full documentation on the **unpause** command. -**docker-version(1)** +**version** Show the Docker version information + See **docker-version(1)** for full documentation on the **version** command. -**docker-wait(1)** +**wait** Block until a container stops, then print its exit code + See **docker-wait(1)** for full documentation on the **wait** command. # STORAGE DRIVER OPTIONS @@ -313,6 +360,18 @@ for data and metadata: --storage-opt dm.metadatadev=/dev/vdc \ --storage-opt dm.basesize=20G +# EXEC DRIVER OPTIONS + +Use the **--exec-opt** flags to specify options to the exec-driver. The only +driver that accepts this flag is the *native* (libcontainer) driver. As a +result, you must also specify **-s=**native for this option to have effect. The +following is the only *native* option: + +#### native.cgroupdriver +Specifies the management of the container's `cgroups`. You can specify +`cgroupfs` or `systemd`. If you specify `systemd` and it is not available, the +system uses `cgroupfs`. + #### Client For specific client examples please see the man page for the specific Docker command. For example: diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index b5b30d72d32be..fb08e289e1a91 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -25,7 +25,7 @@ pages: # Introduction: - ['index.md', 'About', 'Docker'] -- ['release-notes.md', 'About', 'Release Notes'] +- ['release-notes.md', 'About', 'Release notes'] - ['introduction/index.md', '**HIDDEN**'] - ['introduction/understanding-docker.md', 'About', 'Understanding Docker'] @@ -34,6 +34,7 @@ pages: - ['installation/ubuntulinux.md', 'Installation', 'Ubuntu'] - ['installation/mac.md', 'Installation', 'Mac OS X'] - ['installation/windows.md', 'Installation', 'Microsoft Windows'] +- ['installation/testing-windows-docker-client.md', 'Installation', 'Building and testing the Windows Docker client'] - ['installation/amazon.md', 'Installation', 'Amazon EC2'] - ['installation/archlinux.md', 'Installation', 'Arch Linux'] - ['installation/binaries.md', 'Installation', 'Binaries'] @@ -54,11 +55,11 @@ pages: - ['compose/install.md', 'Installation', 'Docker Compose'] # User Guide: -- ['userguide/index.md', 'User Guide', 'The Docker User Guide' ] -- ['userguide/dockerhub.md', 'User Guide', 'Getting Started with Docker Hub' ] -- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing Applications' ] -- ['userguide/usingdocker.md', 'User Guide', 'Working with Containers' ] -- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker Images' ] +- ['userguide/index.md', 'User Guide', 'The Docker user guide' ] +- ['userguide/dockerhub.md', 'User Guide', 'Getting started with Docker Hub' ] +- ['userguide/dockerizing.md', 'User Guide', 'Dockerizing applications' ] +- ['userguide/usingdocker.md', 'User Guide', 'Working with containers' ] +- ['userguide/dockerimages.md', 'User Guide', 'Working with Docker images' ] - ['userguide/dockerlinks.md', 'User Guide', 'Linking containers together' ] - ['userguide/dockervolumes.md', 'User Guide', 'Managing data in containers' ] - ['userguide/labels-custom-metadata.md', 'User Guide', 'Apply custom metadata' ] @@ -66,6 +67,8 @@ pages: - ['userguide/level1.md', '**HIDDEN**' ] - ['userguide/level2.md', '**HIDDEN**' ] - ['compose/index.md', 'User Guide', 'Docker Compose' ] +- ['compose/production.md', 'User Guide', '    ▪  Use Compose in production' ] +- ['compose/extends.md', 'User Guide', '    ▪  Extend Compose services' ] - ['machine/index.md', 'User Guide', 'Docker Machine' ] - ['swarm/index.md', 'User Guide', 'Docker Swarm' ] @@ -74,12 +77,16 @@ pages: - ['docker-hub/accounts.md', 'Docker Hub', 'Accounts'] - ['docker-hub/repos.md', 'Docker Hub', 'Repositories'] - ['docker-hub/builds.md', 'Docker Hub', 'Automated Builds'] -- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repo Guidelines'] +- ['docker-hub/official_repos.md', 'Docker Hub', 'Official Repositories'] -# Docker Hub Enterprise -#- ['docker-hub-enterprise/index.md', '**HIDDEN**' ] -#- ['docker-hub-enterprise/install-config.md', 'Docker Hub Enterprise', 'Installation and Configuration' ] -#- ['docker-hub-enterprise/usage.md', 'Docker Hub Enterprise', 'User Guide' ] +# Docker Hub Enterprise: +- ['docker-hub-enterprise/index.md', 'Docker Hub Enterprise', 'Overview' ] +- ['docker-hub-enterprise/quick-start.md', 'Docker Hub Enterprise', 'Quick Start: Basic Workflow' ] +- ['docker-hub-enterprise/userguide.md', 'Docker Hub Enterprise', 'User Guide' ] +- ['docker-hub-enterprise/adminguide.md', 'Docker Hub Enterprise', 'Admin Guide' ] +- ['docker-hub-enterprise/install.md', 'Docker Hub Enterprise', '  Installation' ] +- ['docker-hub-enterprise/configuration.md', 'Docker Hub Enterprise', '  Configuration options' ] +- ['docker-hub-enterprise/support.md', 'Docker Hub Enterprise', 'Support' ] # Examples: - ['examples/index.md', '**HIDDEN**'] @@ -107,6 +114,7 @@ pages: - ['articles/dockerfile_best-practices.md', 'Articles', 'Best practices for writing Dockerfiles'] - ['articles/certificates.md', 'Articles', 'Using certificates for repository client verification'] - ['articles/using_supervisord.md', 'Articles', 'Using Supervisor'] +- ['articles/configuring.md', 'Articles', 'Configuring Docker'] - ['articles/cfengine_process_management.md', 'Articles', 'Process management with CFEngine'] - ['articles/puppet.md', 'Articles', 'Using Puppet'] - ['articles/chef.md', 'Articles', 'Using Chef'] @@ -122,8 +130,7 @@ pages: - ['reference/commandline/cli.md', 'Reference', 'Docker command line'] - ['reference/builder.md', 'Reference', 'Dockerfile'] - ['faq.md', 'Reference', 'FAQ'] -- ['reference/glossary.md', 'Reference', 'Glossary'] -- ['reference/run.md', 'Reference', 'Run Reference'] +- ['reference/run.md', 'Reference', 'Run reference'] - ['compose/cli.md', 'Reference', 'Compose command line'] - ['compose/yml.md', 'Reference', 'Compose yml'] - ['compose/env.md', 'Reference', 'Compose ENV variables'] @@ -133,16 +140,24 @@ pages: - ['swarm/scheduler/filter.md', 'Reference', 'Swarm filters'] - ['swarm/API.md', 'Reference', 'Swarm API'] - ['reference/api/index.md', '**HIDDEN**'] +- ['registry/index.md', 'Reference', 'Docker Registry 2.0'] +- ['registry/deploying.md', 'Reference', '    ▪  Deploy a registry' ] +- ['registry/configuration.md', 'Reference', '    ▪  Configure a registry' ] +- ['registry/storagedrivers.md', 'Reference', '    ▪  Storage driver model' ] +- ['registry/notifications.md', 'Reference', '    ▪  Work with notifications' ] +- ['registry/spec/api.md', 'Reference', '    ▪  Registry Service API v2' ] +- ['registry/spec/json.md', 'Reference', '    ▪  JSON format' ] +- ['registry/spec/auth/token.md', 'Reference', '    ▪  Authenticate via central service' ] +- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry 1.0'] +- ['reference/api/registry_api.md', 'Reference', '    ▪ Docker Registry API v1'] +- ['reference/api/registry_api_client_libraries.md', 'Reference', '    ▪ Docker Registry 1.0 API client libraries'] +#- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0'] - ['reference/api/docker-io_api.md', 'Reference', 'Docker Hub API'] -- ['reference/api/registry_api.md', 'Reference', 'Docker Registry API'] -- ['reference/api/registry_api_client_libraries.md', 'Reference', 'Docker Registry API Client Libraries'] -- ['reference/api/hub_registry_spec.md', 'Reference', 'Docker Hub and Registry Spec'] #- ['reference/image-spec-v1.md', 'Reference', 'Docker Image Specification v1.0.0'] - ['reference/api/docker_remote_api.md', 'Reference', 'Docker Remote API'] -- ['reference/api/docker_remote_api_v1.19.md', 'Reference', 'Docker Remote API v1.19'] - ['reference/api/docker_remote_api_v1.18.md', 'Reference', 'Docker Remote API v1.18'] - ['reference/api/docker_remote_api_v1.17.md', 'Reference', 'Docker Remote API v1.17'] -- ['reference/api/docker_remote_api_v1.16.md', '**HIDDEN**'] +- ['reference/api/docker_remote_api_v1.16.md', 'Reference', 'Docker Remote API v1.16'] - ['reference/api/docker_remote_api_v1.15.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.14.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.13.md', '**HIDDEN**'] @@ -159,8 +174,14 @@ pages: - ['reference/api/docker_remote_api_v1.2.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.1.md', '**HIDDEN**'] - ['reference/api/docker_remote_api_v1.0.md', '**HIDDEN**'] -- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries'] -- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub Accounts API'] +- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API client libraries'] +- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker Hub accounts API'] + +# Hidden registry files +- ['registry/storage-drivers/azure.md', '**HIDDEN**' ] +- ['registry/storage-drivers/filesystem.md', '**HIDDEN**' ] +- ['registry/storage-drivers/inmemory.md', '**HIDDEN**' ] +- ['registry/storage-drivers/s3.md', '**HIDDEN**' ] - ['jsearch.md', '**HIDDEN**'] @@ -177,18 +198,19 @@ pages: # Project: - ['project/index.md', '**HIDDEN**'] -- ['project/who-written-for.md', 'Contributor Guide', 'README first'] -- ['project/software-required.md', 'Contributor Guide', 'Get required software'] -- ['project/set-up-git.md', 'Contributor Guide', 'Configure Git for contributing'] -- ['project/set-up-dev-env.md', 'Contributor Guide', 'Work with a development container'] -- ['project/test-and-docs.md', 'Contributor Guide', 'Run tests and test documentation'] -- ['project/make-a-contribution.md', 'Contributor Guide', 'Understand contribution workflow'] -- ['project/find-an-issue.md', 'Contributor Guide', 'Find an issue'] -- ['project/work-issue.md', 'Contributor Guide', 'Work on an issue'] -- ['project/create-pr.md', 'Contributor Guide', 'Create a pull request'] -- ['project/review-pr.md', 'Contributor Guide', 'Participate in the PR review'] -- ['project/advanced-contributing.md', 'Contributor Guide', 'Advanced contributing'] -- ['project/get-help.md', 'Contributor Guide', 'Where to get help'] -- ['project/coding-style.md', 'Contributor Guide', 'Coding style guide'] -- ['project/doc-style.md', 'Contributor Guide', 'Documentation style guide'] +- ['project/who-written-for.md', 'Contributor', 'README first'] +- ['project/software-required.md', 'Contributor', 'Get required software for Linux or OS X'] +- ['project/software-req-win.md', 'Contributor', 'Get required software for Windows'] +- ['project/set-up-git.md', 'Contributor', 'Configure Git for contributing'] +- ['project/set-up-dev-env.md', 'Contributor', 'Work with a development container'] +- ['project/test-and-docs.md', 'Contributor', 'Run tests and test documentation'] +- ['project/make-a-contribution.md', 'Contributor', 'Understand contribution workflow'] +- ['project/find-an-issue.md', 'Contributor', 'Find an issue'] +- ['project/work-issue.md', 'Contributor', 'Work on an issue'] +- ['project/create-pr.md', 'Contributor', 'Create a pull request'] +- ['project/review-pr.md', 'Contributor', 'Participate in the PR review'] +- ['project/advanced-contributing.md', 'Contributor', 'Advanced contributing'] +- ['project/get-help.md', 'Contributor', 'Where to get help'] +- ['project/coding-style.md', 'Contributor', 'Coding style guide'] +- ['project/doc-style.md', 'Contributor', 'Documentation style guide'] diff --git a/docs/release.sh b/docs/release.sh index 09a85016c145e..d01bc0293c396 100755 --- a/docs/release.sh +++ b/docs/release.sh @@ -22,10 +22,17 @@ EOF } create_robots_txt() { - cat > ./sources/robots.txt <<'EOF' -User-agent: * -Disallow: / -EOF + if [ "$AWS_S3_BUCKET" == "docs.docker.com" ]; then + cat > ./sources/robots.txt <<-'EOF' + User-agent: * + Allow: / + EOF + else + cat > ./sources/robots.txt <<-'EOF' + User-agent: * + Disallow: / + EOF + fi } setup_s3() { diff --git a/docs/s3_website.json b/docs/s3_website.json index 1142fc0d87fed..b2479bc338b29 100644 --- a/docs/s3_website.json +++ b/docs/s3_website.json @@ -42,7 +42,9 @@ { "Condition": { "KeyPrefixEquals": "installation/openSUSE/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "installation/SUSE/" } }, { "Condition": { "KeyPrefixEquals": "contributing/contributing/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/who-written-for/" } }, { "Condition": { "KeyPrefixEquals": "contributing/devenvironment/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/set-up-prereqs/" } }, - { "Condition": { "KeyPrefixEquals": "contributing/docs_style-guide/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/doc-style/" } } + { "Condition": { "KeyPrefixEquals": "contributing/docs_style-guide/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "project/doc-style/" } }, + { "Condition": { "KeyPrefixEquals": "registry/overview/" }, "Redirect": { "HostName": "$BUCKET", "ReplaceKeyPrefixWith": "registry/" } } + ] } diff --git a/docs/sources/articles/ambassador_pattern_linking.md b/docs/sources/articles/ambassador_pattern_linking.md index 755fa4dc9c468..2f168262a31aa 100644 --- a/docs/sources/articles/ambassador_pattern_linking.md +++ b/docs/sources/articles/ambassador_pattern_linking.md @@ -1,8 +1,8 @@ -page_title: Link via an Ambassador Container +page_title: Link via an ambassador container page_description: Using the Ambassador pattern to abstract (network) services page_keywords: Examples, Usage, links, docker, documentation, examples, names, name, container naming -# Link via an Ambassador Container +# Link via an ambassador container ## Introduction @@ -30,7 +30,7 @@ different docker host from the consumer. Using the `svendowideit/ambassador` container, the link wiring is controlled entirely from the `docker run` parameters. -## Two host Example +## Two host example Start actual Redis server on one Docker host diff --git a/docs/sources/articles/b2d_volume_resize.md b/docs/sources/articles/b2d_volume_resize.md index 1b39b49eda461..53c8590955724 100644 --- a/docs/sources/articles/b2d_volume_resize.md +++ b/docs/sources/articles/b2d_volume_resize.md @@ -1,5 +1,5 @@ -page_title: Resizing a Boot2Docker Volume -page_description: Resizing a Boot2Docker Volume in VirtualBox with GParted +page_title: Resizing a Boot2Docker volume +page_description: Resizing a Boot2Docker volume in VirtualBox with GParted page_keywords: boot2docker, volume, virtualbox # Getting “no space left on device” errors with Boot2Docker? @@ -60,7 +60,7 @@ You might need to create the bus before you can add the ISO. ## 5. Add the new VDI image In the settings for the Boot2Docker image in VirtualBox, remove the VMDK image -from the SATA contoller and add the VDI image. +from the SATA controller and add the VDI image. diff --git a/docs/sources/articles/baseimages.md b/docs/sources/articles/baseimages.md index 701f432ffb111..a1a7665b704e8 100644 --- a/docs/sources/articles/baseimages.md +++ b/docs/sources/articles/baseimages.md @@ -1,8 +1,8 @@ -page_title: Create a Base Image +page_title: Create a base image page_description: How to create base images page_keywords: Examples, Usage, base image, docker, documentation, examples -# Create a Base Image +# Create a base image So you want to create your own [*Base Image*]( /terms/image/#base-image)? Great! @@ -65,4 +65,4 @@ There are lots more resources available to help you write your 'Dockerfile`. * There's a [complete guide to all the instructions](/reference/builder/) available for use in a `Dockerfile` in the reference section. * To help you write a clear, readable, maintainable `Dockerfile`, we've also written a [`Dockerfile` Best Practices guide](/articles/dockerfile_best-practices). -* If you're working on an Official Repo, be sure to check out the [Official Repo Guidelines](/docker-hub/official_repos/). +* If your goal is to create a new Official Repository, be sure to read up on Docker's [Official Repositories](/docker-hub/official_repos/). diff --git a/docs/sources/articles/basics.md b/docs/sources/articles/basics.md index 94264ece64e88..7d7c154091b7c 100644 --- a/docs/sources/articles/basics.md +++ b/docs/sources/articles/basics.md @@ -172,7 +172,7 @@ will be stored (as a diff). See which images you already have using the # Commit your container to a new named image $ docker commit - # List your containers + # List your images $ docker images You now have an image state from which you can create new instances. diff --git a/docs/sources/articles/cfengine_process_management.md b/docs/sources/articles/cfengine_process_management.md index a9441a6d351e2..b0437268b0a5f 100644 --- a/docs/sources/articles/cfengine_process_management.md +++ b/docs/sources/articles/cfengine_process_management.md @@ -1,8 +1,8 @@ -page_title: Process Management with CFEngine +page_title: Process management with CFEngine page_description: Managing containerized processes with CFEngine page_keywords: cfengine, process, management, usage, docker, documentation -# Process Management with CFEngine +# Process management with CFEngine Create Docker containers with managed processes. diff --git a/docs/sources/articles/chef.md b/docs/sources/articles/chef.md index 8fe0504ffa556..84ccdffb2be99 100644 --- a/docs/sources/articles/chef.md +++ b/docs/sources/articles/chef.md @@ -1,4 +1,4 @@ -page_title: Chef Usage +page_title: Using Chef page_description: Installation and using Docker via Chef page_keywords: chef, installation, usage, docker, documentation diff --git a/docs/sources/articles/configuring.md b/docs/sources/articles/configuring.md new file mode 100644 index 0000000000000..35d0eb8e5823a --- /dev/null +++ b/docs/sources/articles/configuring.md @@ -0,0 +1,98 @@ +page_title: Configuring Docker +page_description: Configuring the Docker daemon on various distributions +page_keywords: docker, daemon, configuration + +# Configuring Docker on various distributions + +After successfully installing Docker, the `docker` daemon runs with it's default +configuration. You can configure the `docker` daemon by passing configuration +flags to it directly when you start it. + +In a production environment, system administrators typically configure the +`docker` daemon to start and stop according to an organization's requirements. In most +cases, the system administrator configures a process manager such as `SysVinit`, `Upstart`, +or `systemd` to manage the `docker` daemon's start and stop. + +Some of the daemon's options are: + +| Flag | Description | +|-----------------------|-----------------------------------------------------------| +| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. | +| `-H`,`--host=[]` | Daemon socket(s) to connect to. | +| `--tls=false` | Enable or disable TLS. By default, this is false. | + +The command line reference has the [complete list of daemon flags](/reference/commandline/cli/#daemon). + +## Direct Configuration + +If you're running the `docker` daemon directly by running `docker -d` instead of using a process manager, +you can append the config options to the run command directly. + + +Here is a an example of running the `docker` daemon with config options: + + docker -d -D --tls=false -H tcp://0.0.0.0:2375 + +These options : + +- Enable `-D` (debug) mode +- Set `tls` to false +- Listen for connections on `tcp://0.0.0.0:2375` + + +## Ubuntu + +After successfully [installing Docker for Ubuntu](/installation/ubuntulinux/), you can check the +running status using Upstart in this way: + + $ sudo status docker + docker start/running, process 989 + +You can start/stop/restart `docker` using + + $ sudo start docker + + $ sudo stop docker + + $ sudo restart docker + + +### Configuring Docker + +You configure the `docker` daemon in the `/etc/default/docker` file on your +system. You do this by specifying values in a `DOCKER_OPTS` variable. +To configure Docker options: + +1. Log into your system as a user with `sudo` or `root` privileges. + +2. If you don't have one, create the `/etc/default/docker` file in your system. + + Depending on how you installed Docker, you may already have this file. + +3. Open the file with your favorite editor. + + $ sudo vi /etc/default/docker + +4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the +`docker` daemon's run command. + + ``` + DOCKER_OPTS=" --dns 8.8.8.8 --dns 8.8.4.4 -D --tls=false -H tcp://0.0.0.0:2375 " + ``` + +These options : + +- Set `dns` server for all containers +- Enable `-D` (debug) mode +- Set `tls` to false +- Listen for connections on `tcp://0.0.0.0:2375` + +5. Save and close the file. + +6. Restart the `docker` daemon. + + $ sudo restart docker + +7. Verify that the `docker` daemon is running as specified wit the `ps` command. + + $ ps aux | grep docker | grep -v grep diff --git a/docs/sources/articles/dockerfile_best-practices.md b/docs/sources/articles/dockerfile_best-practices.md index 83a77fc74d8a1..2604b22453bdc 100644 --- a/docs/sources/articles/dockerfile_best-practices.md +++ b/docs/sources/articles/dockerfile_best-practices.md @@ -1,4 +1,4 @@ -page_title: Best Practices for Writing Dockerfiles +page_title: Best practices for writing Dockerfiles page_description: Hints, tips and guidelines for writing clean, reliable Dockerfiles page_keywords: Examples, Usage, base image, docker, documentation, dockerfile, best practices, hub, official repo @@ -32,13 +32,14 @@ ephemeral as possible. By “ephemeral,” we mean that it can be stopped and destroyed and a new one built and put in place with an absolute minimum of set-up and configuration. -### Use [a .dockerignore file](https://docs.docker.com/reference/builder/#the-dockerignore-file) +### Use a .dockerignore file -For faster uploading and efficiency during `docker build`, you should use -a `.dockerignore` file to exclude files or directories from the build -context and final image. For example, unless`.git` is needed by your build -process or scripts, you should add it to `.dockerignore`, which can save many -megabytes worth of upload time. +In most cases, it's best to put each Dockerfile in an empty directory. Then, +add to that directory only the files needed for building the Dockerfile. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to that directory as well. This file supports +exclusion patterns similar to `.gitignore` files. For information on creating one, +see the [.dockerignore file](../../reference/builder/#dockerignore-file). ### Avoid installing unnecessary packages @@ -419,16 +420,16 @@ fail catastrophically if the new build's context is missing the resource being added. Adding a separate tag, as recommended above, will help mitigate this by allowing the `Dockerfile` author to make a choice. -## Examples For Official Repositories +## Examples for Official Repositories -These Official Repos have exemplary `Dockerfile`s: +These Official Repositories have exemplary `Dockerfile`s: * [Go](https://registry.hub.docker.com/_/golang/) * [Perl](https://registry.hub.docker.com/_/perl/) * [Hy](https://registry.hub.docker.com/_/hylang/) * [Rails](https://registry.hub.docker.com/_/rails) -## Additional Resources: +## Additional resources: * [Dockerfile Reference](https://docs.docker.com/reference/builder/#onbuild) * [More about Base Images](https://docs.docker.com/articles/baseimages/) diff --git a/docs/sources/articles/host_integration.md b/docs/sources/articles/host_integration.md index cbcb21a357bd0..e3451764bb056 100644 --- a/docs/sources/articles/host_integration.md +++ b/docs/sources/articles/host_integration.md @@ -1,8 +1,8 @@ -page_title: Automatically Start Containers +page_title: Automatically start containers page_description: How to generate scripts for upstart, systemd, etc. page_keywords: systemd, upstart, supervisor, docker, documentation, host integration -# Automatically Start Containers +# Automatically start containers As of Docker 1.2, [restart policies](/reference/commandline/cli/#restart-policies) are the @@ -18,7 +18,7 @@ that depend on Docker containers), you can use a process manager like [supervisor](http://supervisord.org/) instead. -## Using a Process Manager +## Using a process manager Docker does not set any restart policies by default, but be aware that they will conflict with most process managers. So don't set restart policies if you are diff --git a/docs/sources/articles/https.md b/docs/sources/articles/https.md index 94d9ca3f22374..d6689bbf1364e 100644 --- a/docs/sources/articles/https.md +++ b/docs/sources/articles/https.md @@ -1,8 +1,8 @@ -page_title: Protecting the Docker daemon Socket with HTTPS +page_title: Protecting the Docker daemon socket with HTTPS page_description: How to setup and run Docker with HTTPS page_keywords: docker, docs, article, example, https, daemon, tls, ca, certificate -# Protecting the Docker daemon Socket with HTTPS +# Protecting the Docker daemon socket with HTTPS By default, Docker runs via a non-networked Unix socket. It can also optionally communicate using a HTTP socket. @@ -193,7 +193,7 @@ location using the environment variable `DOCKER_CERT_PATH`. $ export DOCKER_CERT_PATH=~/.docker/zone1/ $ docker --tlsverify ps -### Connecting to the Secure Docker port using `curl` +### Connecting to the secure Docker port using `curl` To use `curl` to make test API requests, you need to use three extra command line flags: diff --git a/docs/sources/articles/networking.md b/docs/sources/articles/networking.md index 46a907f7e27bc..823b450c75656 100644 --- a/docs/sources/articles/networking.md +++ b/docs/sources/articles/networking.md @@ -1,8 +1,8 @@ -page_title: Network Configuration +page_title: Network configuration page_description: Docker networking page_keywords: network, networking, bridge, docker, documentation -# Network Configuration +# Network configuration ## TL;DR @@ -41,7 +41,7 @@ can use Docker options and — in advanced cases — raw Linux networking commands to tweak, supplement, or entirely replace Docker's default networking configuration. -## Quick Guide to the Options +## Quick guide to the options Here is a quick list of the networking-related Docker command-line options, in case it helps you find the section below that you are @@ -56,6 +56,12 @@ server when it starts up, and cannot be changed once it is running: * `--bip=CIDR` — see [Customizing docker0](#docker0) + * `--default-gateway=IP_ADDRESS` — see + [How Docker networks a container](#container-networking) + + * `--default-gateway-v6=IP_ADDRESS` — see + [IPv6](#ipv6) + * `--fixed-cidr` — see [Customizing docker0](#docker0) @@ -499,7 +505,9 @@ want to configure `eth0` via Router Advertisements you should set: ![](/article-img/ipv6_basic_host_config.svg) Every new container will get an IPv6 address from the defined subnet. Further -a default route will be added via the gateway `fe80::1` on `eth0`: +a default route will be added on `eth0` in the container via the address +specified by the daemon option `--default-gateway-v6` if present, otherwise +via `fe80::1`: docker run -it ubuntu bash -c "ip -6 addr show dev eth0; ip -6 route show" @@ -568,7 +576,7 @@ As soon as the router wants to send an IPv6 packet to the first container it will transmit a neighbor solicitation request, asking, who has `2001:db8::c009`? But it will get no answer because noone on this subnet has this address. The container with this address is hidden behind the Docker host. -The Docker host has to listen to neighbor solication requests for the container +The Docker host has to listen to neighbor solicitation requests for the container address and send a response that itself is the device that is responsible for the address. This is done by a Kernel feature called `NDP Proxy`. You can enable it by executing @@ -593,9 +601,9 @@ You have to execute the `ip -6 neigh add proxy ...` command for every IPv6 address in your Docker subnet. Unfortunately there is no functionality for adding a whole subnet by executing one command. -### Docker IPv6 Cluster +### Docker IPv6 cluster -#### Switched Network Environment +#### Switched network environment Using routable IPv6 addresses allows you to realize communication between containers on different hosts. Let's have a look at a simple Docker IPv6 cluster example: @@ -641,7 +649,7 @@ the Docker subnet on the host, the container IP addresses and the routes on the containers. The configuration above the line is up to the user and can be adapted to the individual environment. -#### Routed Network Environment +#### Routed network environment In a routed network environment you replace the layer 2 switch with a layer 3 router. Now the hosts just have to know their default gateway (the router) and @@ -865,12 +873,13 @@ The steps with which Docker configures a container are: parameter or generate a random one. 5. Give the container's `eth0` a new IP address from within the - bridge's range of network addresses, and set its default route to - the IP address that the Docker host owns on the bridge. The MAC - address is generated from the IP address unless otherwise specified. - This prevents ARP cache invalidation problems, when a new container - comes up with an IP used in the past by another container with another - MAC. + bridge's range of network addresses. The default route is set to the + IP address passed to the Docker daemon using the `--default-gateway` + option if specified, otherwise to the IP address that the Docker host + owns on the bridge. The MAC address is generated from the IP address + unless otherwise specified. This prevents ARP cache invalidation + problems, when a new container comes up with an IP used in the past by + another container with another MAC. With these steps complete, the container now possesses an `eth0` (virtual) network card and will find itself able to communicate with @@ -984,7 +993,7 @@ of the right to configure their own networks. Using `ip netns exec` is what let us finish up the configuration without having to take the dangerous step of running the container itself with `--privileged=true`. -## Tools and Examples +## Tools and examples Before diving into the following sections on custom network topologies, you might be interested in glancing at a few external tools or examples diff --git a/docs/sources/articles/puppet.md b/docs/sources/articles/puppet.md index 705285fbaf852..a1b3d273a4e33 100644 --- a/docs/sources/articles/puppet.md +++ b/docs/sources/articles/puppet.md @@ -1,5 +1,5 @@ -page_title: Puppet Usage -page_description: Installating and using Puppet +page_title: Using Puppet +page_description: Installing and using Puppet page_keywords: puppet, installation, usage, docker, documentation # Using Puppet diff --git a/docs/sources/articles/runmetrics.md b/docs/sources/articles/runmetrics.md index 3276409697426..a887d4369a6a8 100644 --- a/docs/sources/articles/runmetrics.md +++ b/docs/sources/articles/runmetrics.md @@ -1,8 +1,8 @@ -page_title: Runtime Metrics +page_title: Runtime metrics page_description: Measure the behavior of running containers page_keywords: docker, metrics, CPU, memory, disk, IO, run, runtime -# Runtime Metrics +# Runtime metrics Linux Containers rely on [control groups]( https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt) @@ -11,7 +11,7 @@ CPU, memory, and block I/O usage. You can access those metrics and obtain network usage metrics as well. This is relevant for "pure" LXC containers, as well as for Docker containers. -## Control Groups +## Control groups Control groups are exposed through a pseudo-filesystem. In recent distros, you should find this filesystem under `/sys/fs/cgroup`. Under @@ -28,7 +28,7 @@ To figure out where your control groups are mounted, you can run: $ grep cgroup /proc/mounts -## Enumerating Cgroups +## Enumerating cgroups You can look into `/proc/cgroups` to see the different control group subsystems known to the system, the hierarchy they belong to, and how many groups they contain. @@ -39,7 +39,7 @@ the hierarchy mountpoint; e.g., `/` means “this process has not been assigned a particular group”, while `/lxc/pumpkin` means that the process is likely to be a member of a container named `pumpkin`. -## Finding the Cgroup for a Given Container +## Finding the cgroup for a given container For each container, one cgroup will be created in each hierarchy. On older systems with older versions of the LXC userland tools, the name of @@ -55,12 +55,12 @@ look it up with `docker inspect` or `docker ps --no-trunc`. Putting everything together to look at the memory metrics for a Docker container, take a look at `/sys/fs/cgroup/memory/lxc//`. -## Metrics from Cgroups: Memory, CPU, Block IO +## Metrics from cgroups: memory, CPU, block I/O For each subsystem (memory, CPU, and block I/O), you will find one or more pseudo-files containing statistics. -### Memory Metrics: `memory.stat` +### Memory metrics: `memory.stat` Memory metrics are found in the "memory" cgroup. Note that the memory control group adds a little overhead, because it does very fine-grained @@ -262,7 +262,7 @@ relevant ones: not perform more I/O, its queue size can increase just because the device load increases because of other devices. -## Network Metrics +## Network metrics Network metrics are not exposed directly by control groups. There is a good explanation for that: network interfaces exist within the context diff --git a/docs/sources/articles/security.md b/docs/sources/articles/security.md index a26f79cf9bf9e..42d15e88c0452 100644 --- a/docs/sources/articles/security.md +++ b/docs/sources/articles/security.md @@ -1,8 +1,8 @@ -page_title: Docker Security +page_title: Docker security page_description: Review of the Docker Daemon attack surface page_keywords: Docker, Docker documentation, security -# Docker Security +# Docker security There are three major areas to consider when reviewing Docker security: @@ -14,7 +14,7 @@ There are three major areas to consider when reviewing Docker security: - the "hardening" security features of the kernel and how they interact with containers. -## Kernel Namespaces +## Kernel namespaces Docker containers are very similar to LXC containers, and they have similar security features. When you start a container with `docker @@ -53,7 +53,7 @@ http://en.wikipedia.org/wiki/OpenVZ) in such a way that they could be merged within the mainstream kernel. And OpenVZ was initially released in 2005, so both the design and the implementation are pretty mature. -## Control Groups +## Control groups Control Groups are another key component of Linux Containers. They implement resource accounting and limiting. They provide many @@ -72,7 +72,7 @@ when some applications start to misbehave. Control Groups have been around for a while as well: the code was started in 2006, and initially merged in kernel 2.6.24. -## Docker Daemon Attack Surface +## Docker daemon attack surface Running containers (and applications) with Docker implies running the Docker daemon. This daemon currently requires `root` privileges, and you @@ -132,7 +132,7 @@ containers controlled by Docker. Of course, it is fine to keep your favorite admin tools (probably at least an SSH server), as well as existing monitoring/supervision processes (e.g., NRPE, collectd, etc). -## Linux Kernel Capabilities +## Linux kernel capabilities By default, Docker starts containers with a restricted set of capabilities. What does that mean? @@ -206,7 +206,7 @@ capability removal, or less secure through the addition of capabilities. The best practice for users would be to remove all capabilities except those explicitly required for their processes. -## Other Kernel Security Features +## Other kernel security features Capabilities are just one of the many security features provided by modern Linux kernels. It is also possible to leverage existing, @@ -249,7 +249,7 @@ may still be utilized by Docker containers on supported kernels, by directly using the clone syscall, or utilizing the 'unshare' utility. Using this, some users may find it possible to drop more capabilities from their process as user namespaces provide -an artifical capabilities set. Likewise, however, this artifical +an artificial capabilities set. Likewise, however, this artificial capabilities set may require use of 'capsh' to restrict the user-namespace capabilities set when using 'unshare'. diff --git a/docs/sources/articles/systemd.md b/docs/sources/articles/systemd.md index fddd146b0702b..18631ee2cf709 100644 --- a/docs/sources/articles/systemd.md +++ b/docs/sources/articles/systemd.md @@ -1,8 +1,8 @@ -page_title: Controlling and configuring Docker using Systemd -page_description: Controlling and configuring Docker using Systemd +page_title: Controlling and configuring Docker using systemd +page_description: Controlling and configuring Docker using systemd page_keywords: docker, daemon, systemd, configuration -# Controlling and configuring Docker using Systemd +# Controlling and configuring Docker using systemd Many Linux distributions use systemd to start the Docker daemon. This document shows a few examples of how to customise Docker's settings. @@ -30,8 +30,8 @@ If the `docker.service` file is set to use an `EnvironmentFile` (often pointing to `/etc/sysconfig/docker`) then you can modify the referenced file. -Or, you may need to edit the `docker.service` file, which can be in `/usr/lib/systemd/system` -or `/etc/systemd/service`. +Or, you may need to edit the `docker.service` file, which can be in +`/usr/lib/systemd/system`, `/etc/systemd/service`, or `/lib/systemd/system`. ### Runtime directory and storage driver @@ -64,7 +64,7 @@ setting `OPTIONS`: You can also set other environment variables in this file, for example, the `HTTP_PROXY` environment variables described below. -### HTTP Proxy +### HTTP proxy This example overrides the default `docker.service` file. diff --git a/docs/sources/docker-hub-enterprise/admin-metrics.png b/docs/sources/docker-hub-enterprise/admin-metrics.png new file mode 100644 index 0000000000000..21a8f74a7cab9 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/admin-metrics.png differ diff --git a/docs/sources/docker-hub-enterprise/admin-settings-http.png b/docs/sources/docker-hub-enterprise/admin-settings-http.png new file mode 100644 index 0000000000000..77df71f7ea9b2 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/admin-settings-http.png differ diff --git a/docs/sources/docker-hub-enterprise/admin.png b/docs/sources/docker-hub-enterprise/admin.png new file mode 100644 index 0000000000000..54826e5658bbb Binary files /dev/null and b/docs/sources/docker-hub-enterprise/admin.png differ diff --git a/docs/sources/docker-hub-enterprise/adminguide.md b/docs/sources/docker-hub-enterprise/adminguide.md new file mode 100644 index 0000000000000..d471041675582 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/adminguide.md @@ -0,0 +1,103 @@ +page_title: Docker Hub Enterprise: Admin guide +page_description: Documentation describing administration of Docker Hub Enterprise +page_keywords: docker, documentation, about, technology, hub, enterprise + +# Docker Hub Enterprise Administrator's Guide + +This guide covers tasks and functions an administrator of Docker Hub Enterprise +(DHE) will need to know about, such as reporting, logging, system management, +performance metrics, etc. +For tasks DHE users need to accomplish, such as using DHE to push and pull +images, please visit the [User's Guide](./userguide). + +## Reporting + +### System Health + +![System Health page](../assets/admin-metrics.png) + +The "System Health" tab displays resource utilization metrics for the DHE host +as well as for each of its contained services. The CPU and RAM usage meters at +the top indicate overall resource usage for the host, while detailed time-series +charts are provided below for each service. You can mouse-over the charts or +meters to see detailed data points. + +Clicking on a service name (i.e., "load_balancer", "admin_server", etc.) will +display the network, CPU, and memory (RAM) utilization data for the specified +service. See below for a +[detailed explanation of the available services](#services). + +### Logs + +![System Logs page](../assets/admin-logs.png) + +Click the "Logs" tab to view all logs related to your DHE instance. You will see +log sections on this page for each service in your DHE instance. Older or newer +logs can be loaded by scrolling up or down. See below for a +[detailed explanation of the available services](#services). + +DHE's log files can be found on the host in `/usr/local/etc/dhe/logs/`. The +files are limited to a maximum size of 64mb. They are rotated every two weeks, +when the aggregator sends logs to the collection server, or they are rotated if +a logfile would exceed 64mb without rotation. Log files are named `-`, where the "component name" is the service it +provides (`manager`, `admin-server`, etc.). + +### Usage statistics and crash reports + +During normal use, DHE generates usage statistics and crash reports. This +information is collected by Docker, Inc. to help us prioritize features, fix +bugs, and improve our products. Specifically, Docker, Inc. collects the +following information: + +* Error logs +* Crash logs + +## Emergency access to the DHE admin web interface + +If your authenticated or public access to the DHE web interface has stopped +working, but your DHE admin container is still running, you can add an +[ambassador container](https://docs.docker.com/articles/ambassador_pattern_linking/) +to get temporary unsecure access to it by running: + + $ docker run --rm -it --link docker_hub_enterprise_admin_server:admin -p 9999:80 svendowideit/ambassador + +> **Note:** This guide assumes you can run Docker commands from a machine where +> you are a member of the `docker` group, or have root privileges. Otherwise, +> you may need to add `sudo` to the example command above. + +This will give you access on port `9999` on your DHE server - `http://:9999/admin/`. + +## Services + +DHE runs several Docker services which are essential to its reliability and +usability. The following services are included; you can see their details by +running queries on the [System Health](#system-health) and [Logs](#logs) pages: + +* `admin_server`: Used for displaying system health, performing upgrades, +configuring settings, and viewing logs. +* `load_balancer`: Used for maintaining high availability by distributing load +to each image storage service (`image_storage_X`). +* `log_aggregator`: A microservice used for aggregating logs from each of the +other services. Handles log persistence and rotation on disk. +* `image_storage_X`: Stores Docker images using the [Docker Registry HTTP API V2](https://github.com/docker/distribution/blob/master/doc/SPEC.md). Typically, +multiple image storage services are used in order to provide greater uptime and +faster, more efficient resource utilization. + +## DHE system management + +The `dockerhubenterprise/manager` image is used to control the DHE system. This +image uses the Docker socket to orchestrate the multiple services that comprise +DHE. + + $ sudo bash -c "$(sudo docker run dockerhubenterprise/manager [COMMAND])" + +Supported commands are: `install`, `start`, `stop`, `restart`, `status`, and +`upgrade`. + +> **Note**: `sudo` is needed for `dockerhubenterprise/manager` commands to +> ensure that the Bash script is run with full access to the Docker host. + +## Next Steps + +For information on installing DHE, take a look at the [Installation instructions](./install.md). diff --git a/docs/sources/docker-hub-enterprise/assets/admin-logs.png b/docs/sources/docker-hub-enterprise/assets/admin-logs.png new file mode 100644 index 0000000000000..76f0d19a80ce9 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-logs.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-metrics.png b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png new file mode 100644 index 0000000000000..ccec72a31a199 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-metrics.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png new file mode 100644 index 0000000000000..ef9dfe3513154 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-basic.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png new file mode 100644 index 0000000000000..112a15c361681 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication-ldap.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png new file mode 100644 index 0000000000000..83cba1287c121 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-authentication.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png new file mode 100644 index 0000000000000..27ce98b27d265 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-http-unlicensed.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png new file mode 100644 index 0000000000000..d860c5088d003 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-http.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png new file mode 100644 index 0000000000000..488f212008c0c Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-license.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png new file mode 100644 index 0000000000000..81d375040e0cd Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-security.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png new file mode 100644 index 0000000000000..9aea039c683c8 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings-storage.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/admin-settings.png b/docs/sources/docker-hub-enterprise/assets/admin-settings.png new file mode 100644 index 0000000000000..699e722e93c19 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/admin-settings.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/console-pull.png b/docs/sources/docker-hub-enterprise/assets/console-pull.png new file mode 100755 index 0000000000000..57f264f4ead39 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/console-pull.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/console-push.png b/docs/sources/docker-hub-enterprise/assets/console-push.png new file mode 100755 index 0000000000000..25acdc18c3e80 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/console-push.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png new file mode 100644 index 0000000000000..49dbfab8745d2 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license-CSDE-dropdown.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png new file mode 100644 index 0000000000000..3c70b747c6bc3 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/docker-hub-org-enterprise-license.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png new file mode 100755 index 0000000000000..667e98b55bcb2 Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/jenkins-plugins.png differ diff --git a/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png new file mode 100755 index 0000000000000..6c8bd5f722bef Binary files /dev/null and b/docs/sources/docker-hub-enterprise/assets/jenkins-ui.png differ diff --git a/docs/sources/docker-hub-enterprise/configuration.md b/docs/sources/docker-hub-enterprise/configuration.md new file mode 100644 index 0000000000000..6050da401a9f2 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/configuration.md @@ -0,0 +1,311 @@ +page_title: Docker Hub Enterprise: Configuration options +page_description: Configuration instructions for Docker Hub Enterprise +page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry + +# Configuration options + +This page will help you properly configure Docker Hub Enterprise (DHE) so it can +run in your environment. + +Start with DHE loaded in your browser and click the "Settings" tab to view +configuration options. You'll see options for configuring: + +* Domains and ports +* Security settings +* Storage settings +* Authentication settings +* Your DHE license + +## Domains and Ports + +![Domain and Ports page](../assets/admin-settings-http.png) + +* *Domain Name*: **required**; defaults to an empty string, the fully qualified domain name assigned to the DHE host. +* *Load Balancer HTTP Port*: defaults to 80, used as the entry point for the image storage service. To see load balancer status, you can query +http://<dhe-host>/load_balancer_status. +* *Load Balancer HTTPS Port*: defaults to 443, used as the secure entry point +for the image storage service. +* *HTTP_PROXY*: defaults to an empty string, proxy server for HTTP requests. +* *HTTPS_PROXY*: defaults to an empty string, proxy server for HTTPS requests. +* *NO_PROXY*: defaults to an empty string, proxy bypass for HTTP and HTTPS requests. + + +> **Note**: If you need DHE to re-generate a self-signed certificate at some +> point, you'll need to first delete `/usr/local/etc/dhe/ssl/server.pem`, and +> then restart the DHE containers, either by changing and saving the "Domain Name", +> or using `bash -c "$(docker run dockerhubenterprise/manager restart)"`. + + +## Security + +![Security settings page](../assets/admin-settings-security.png) + +* *SSL Certificate*: Used to enter the hash (string) from the SSL Certificate. +This cert must be accompanied by its private key, entered below. +* *Private Key*: The hash from the private key associated with the provided +SSL Certificate (as a standard x509 key pair). + +In order to run, DHE requires encrypted communications via HTTPS/SSL between (a) the DHE registry and your Docker Engine(s), and (b) between your web browser and the DHE admin server. There are a few options for setting this up: + +1. You can use the self-signed certificate DHE generates by default. +2. You can generate your own certificates using a public service or your enterprise's infrastructure. See the [Generating SSL certificates](#generating-ssl-certificates) section for the options available. + +If you are generating your own certificates, you can install them by following the instructions for +[Adding your own registry certificates to DHE](#adding-your-own-registry-certificates-to-dhe). + +On the other hand, if you choose to use the DHE-generated certificates, or the +certificates you generate yourself are not trusted by your client Docker hosts, +you will need to do one of the following: + +* [Install a registry certificate on all of your client Docker daemons](#installing-registry-certificates-on-client-docker-daemons), + +* Set your [client Docker daemons to run with an unconfirmed connection to the registry](#if-you-cant-install-the-certificates). + +### Generating SSL certificates + +There are three basic approaches to generating certificates: + +1. Most enterprises will have private key infrastructure (PKI) in place to +generate keys. Consult with your security team or whomever manages your private +key infrastructure. If you have this resource available, Docker recommends you +use it. + +2. If your enterprise can't provide keys, you can use a public Certificate +Authority (CA) like "InstantSSL.com" or "RapidSSL.com" to generate a +certificate. If your certificates are generated using a globally trusted +Certificate Authority, you won't need to install them on all of your +client Docker daemons. + +3. Use the self-signed registry certificate generated by DHE, and install it +onto the client Docker daemon hosts as shown below. + +### Adding your own Registry certificates to DHE + +Whichever method you use to generate certificates, once you have them +you can set up your DHE server to use them by navigating to the "Settings" page, +going to "Security," and putting the SSL Certificate text (including all +intermediate Certificates, starting with the host) into the +"SSL Certificate" edit box, and the previously generated Private key into +the "SSL Private Key" edit box. + +Click the "Save" button, and then wait for the DHE Admin site to restart and +reload. It should now be using the new certificate. + +Once the "Security" page has reloaded, it will show `#` hashes instead of the +certificate text you pasted in. + +If your certificate is signed by a chain of Certificate Authorities that are +already trusted by your Docker daemon servers, you can skip the "Installing +registry certificates" step below. + +### Installing Registry certificates on client Docker daemons + +If your certificates do not have a trusted Certificate Authority, you will need +to install them on each client Docker daemon host. + +The procedure for installing the DHE certificates on each Linux distribution has +slightly different steps, as shown below. + +You can test this certificate using `curl`: + +``` +$ curl https://dhe.yourdomain.com/v2/ +curl: (60) SSL certificate problem: self signed certificate +More details here: http://curl.haxx.se/docs/sslcerts.html + +curl performs SSL certificate verification by default, using a "bundle" + of Certificate Authority (CA) public keys (CA certs). If the default + bundle file isn't adequate, you can specify an alternate file + using the --cacert option. +If this HTTPS server uses a certificate signed by a CA represented in + the bundle, the certificate verification probably failed due to a + problem with the certificate (it might be expired, or the name might + not match the domain name in the URL). +If you'd like to turn off curl's verification of the certificate, use + the -k (or --insecure) option. + +$ curl --cacert /usr/local/etc/dhe/ssl/server.pem https://dhe.yourdomain.com/v2/ +{"errors":[{"code":"UNAUTHORIZED","message":"access to the requested resource is not authorized","detail":null}]} +``` + +Continue by following the steps corresponding to your chosen OS. + +#### Ubuntu/Debian + +``` + $ export DOMAIN_NAME=dhe.yourdomain.com + $ openssl s_client -connect $DOMAIN_NAME:443 -showcerts /dev/null | openssl x509 -outform PEM | tee /usr/local/share/ca-certificates/$DOMAIN_NAME.crt + $ update-ca-certificates + Updating certificates in /etc/ssl/certs... 1 added, 0 removed; done. + Running hooks in /etc/ca-certificates/update.d....done. + $ service docker restart + docker stop/waiting + docker start/running, process 29291 +``` + +#### RHEL + +``` + $ export DOMAIN_NAME=dhe.yourdomain.com + $ openssl s_client -connect $DOMAIN_NAME:443 -showcerts /dev/null | openssl x509 -outform PEM | tee /etc/pki/ca-trust/source/anchors/$DOMAIN_NAME.crt + $ update-ca-trust + $ /bin/systemctl restart docker.service +``` + +#### Boot2Docker 1.6.0 + +Install the CA cert (or the auto-generated cert) by adding the following to +your `/var/lib/boot2docker/bootsync.sh`: + +``` +#!/bin/sh + +cat /var/lib/boot2docker/server.pem >> /etc/ssl/certs/ca-certificates.crt +``` + + +Then get the certificate from the new DHE server using: + +``` +$ openssl s_client -connect dhe.yourdomain.com:443 -showcerts /dev/null | openssl x509 -outform PEM | sudo tee -a /var/lib/boot2docker/server.pem +``` + +If your certificate chain is complicated, you may want to use the changes in +[Pull request 807](https://github.com/boot2docker/boot2docker/pull/807/files) + +Now you can either reboot your Boot2Docker virtual machine, or run the following to +install the server certificate, and then restart the Docker daemon. + +``` +$ sudo chmod 755 /var/lib/boot2docker/bootsync.sh +$ sudo /var/lib/boot2docker/bootsync.sh +$ sudo /etc/init.d/docker restart`. +``` + +### If you can't install the certificates + +If for some reason you can't install the certificate chain on a client Docker host, +or your certificates do not have a global CA, you can configure your Docker daemon to run in "insecure" mode. This is done by adding an extra flag, +`--insecure-registry host-ip|domain-name`, to your client Docker daemon startup flags. +You'll need to restart the Docker daemon for the change to take effect. + +This flag means that the communications between your Docker client and the DHE +Registry server are still encrypted, but the client Docker daemon is not +confirming that the Registry connection is not being hijacked or diverted. + +> **Note**: If you enter a "Domain Name" into the "Security" settings, it needs +> to be DNS resolvable on any client Docker daemons that are running in +> "insecure-registry" mode. + +To set the flag, follow the directions below for your operating system. + +#### Ubuntu + +On Ubuntu 14.04 LTS, you customize the Docker daemon configuration with the +`/etc/defaults/docker` file. + +Open or create the `/etc/defaults/docker` file, and add the +`--insecure-registry` flag to the `DOCKER_OPTS` setting (which may need to be +added or uncommented) as follows: + +``` +DOCKER_OPTS="--insecure-registry dhe.yourdomain.com" +``` + +Then restart the Docker daemon with `sudo service docker restart`. + +#### RHEL + +On RHEL, you customize the Docker daemon configuration with the +`/etc/sysconfig/docker` file. + +Open or create the `/etc/sysconfig/docker` file, and add the +`--insecure-registry` flag to the `OPTIONS` setting (which may need to be +added or uncommented) as follows: + +``` +OPTIONS="--insecure-registry dhe.yourdomain.com" +``` + +Then restart the Docker daemon with `sudo service docker restart`. + +### Boot2Docker + +On Boot2Docker, you customize the Docker daemon configuration with the +`/var/lib/boot2docker/profile` file. + +Open or create the `/var/lib/boot2docker/profile` file, and add an `EXTRA_ARGS` +setting as follows: + +``` +EXTRA_ARGS="--insecure-registry dhe.yourdomain.com" +``` + +Then restart the Docker daemon with `sudo /etc/init.d/docker restart`. + +## Image Storage Configuration + +DHE offers multiple methods for image storage, which are defined using specific +storage drivers. Image storage can be local, remote, or on a cloud service such +as S3. Storage drivers can be added or customized via the DHE storage driver +API. + +![Storage settings page](../assets/admin-settings-storage.png) + +* *Yaml configuration file*: This file (`/usr/local/etc/dhe/storage.yml`) is +used to configure the image storage services. The editable text of the file is +displayed in the dialog box. The schema of this file is identical to that used +by the [Registry 2.0](http://docs.docker.com/registry/configuration/). +* If you are using the file system driver to provide local image storage, you will need to specify a root directory which will get mounted as a sub-path of +`/var/local/dhe/image-storage`. The default value of this root directory is +`/local`, so the full path to it is `/var/local/dhe/image-storage/local`. + +> **Note:** +> Saving changes you've made to settings will restart the Docker Hub Enterprise +> instance. The restart may cause a brief interruption for users of the image +> storage system. + +## Authentication + +The current authentication methods are `None`, `Basic` and `LDAP`. + +The `Basic` setting includes: + +![Basic authentication settings page](../assets/admin-settings-authentication-basic.png) + +* A button to add one user, or to upload a CSV file containing username, +password pairs +* A DHE website Administrator Filter, allowing you to either +* * 'Allow all authenticated users' to log into the DHE admin web interface, or +* * 'Whitelist usernames', which allows you to restrict access to the web +interface to the listed set of users. + +The `LDAP` setting includes: + +![LDAP authentication settings page](../assets/admin-settings-authentication-ldap.png) + +* *Use StartTLS*: defaults to unchecked, check to enable StartTLS +* *LDAP Server URL*: **required**; defaults to null, LDAP server URL (e.g., - ldap://example.com) +* *User Base DN*: **required**; defaults to null, user base DN in the form +(e.g., - dc=example,dc=com) +* *User Login Attribute*: **required**; defaults to null, user login attribute +(e.g., - uid or sAMAccountName) +* *Search User DN*:** required**; defaults to null, search user DN +(e.g., - domain\username) +* *Search User Password*: **required**; defaults to null, search user password +* A *DHE Registry User filter*, allowing you to either +* * 'Allow all authenticated users' to push or pull any images, or +* * 'Filter LDAP search results', which allows you to restrict DHE registry pull +and push to users matching the LDAP filter, +* * 'Whitelist usernames', which allows you to restrict DHE registry pull and +push to the listed set of users. +* A *DHE website Administrator filter*, allowing you to either +* * 'Allow all authenticated users' to log into the DHE admin web interface, or +* * 'Filter LDAP search results', which allows you to restrict DHE admin web access to users matching the LDAP filter, +* * 'Whitelist usernames', which allows you to restrict access to the web interface to the listed set of users. + +## Next Steps + +For information on getting support for DHE, take a look at the +[Support information](./support.md). + diff --git a/docs/sources/docker-hub-enterprise/index.md b/docs/sources/docker-hub-enterprise/index.md new file mode 100644 index 0000000000000..c14bf9280fca3 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/index.md @@ -0,0 +1,50 @@ +page_title: Docker Hub Enterprise: Overview +page_description: Docker Hub Enterprise +page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry + +# Overview + +Docker Hub Enterprise (DHE) lets you run and manage your own Docker image +storage service, securely on your own infrastructure behind your company +firewall. This allows you to securely store, push, and pull the images used by +your enterprise to build, ship, and run applications. DHE also provides +monitoring and usage information to help you understand the workloads being +placed on it. + +Specifically, DHE provides: + +* An image registry to store, manage, and collaborate on Docker images +* Pluggable storage drivers +* Configuration options to let you run DHE in your particular enterprise +environment. +* Easy, transparent upgrades +* Logging, usage and system health metrics + +DHE is perfect for: + +* Providing a secure, on-premise development environment +* Creating a streamlined build pipeline +* Building a consistent, high-performance test/QA environment +* Managing image deployment + +DHE is built on [version 2 of the Docker registry](https://github.com/docker/distribution). + +## Documentation + +The following documentation for DHE is available: + +* **Overview** This page. +* [**Quick Start: Basic User Workflow**](./quick-start.md) Go here to learn the +fundamentals of how DHE works and how you can set up a simple, but useful +workflow. +* [**User Guide**](./userguide.md) Go here to learn about using DHE from day to +day. +* [**Administrator Guide**](./adminguide.md) Go here if you are an administrator +responsible for running and maintaining DHE. +* [**Installation**](install.md) Go here for the steps you'll need to install +DHE and get it working. +* [**Configuration**](./configuration.md) Go here to find out details about +setting up and configuring DHE for your particular environment. +* [**Support**](./support.md) Go here for information on getting support for +DHE. + diff --git a/docs/sources/docker-hub-enterprise/install-config.md b/docs/sources/docker-hub-enterprise/install-config.md deleted file mode 100644 index 0b7bcfd6fe633..0000000000000 --- a/docs/sources/docker-hub-enterprise/install-config.md +++ /dev/null @@ -1,8 +0,0 @@ -page_title: Using Docker Hub Enterprise Installation -page_description: Docker Hub Enterprise Installation -page_keywords: docker hub enterprise - -# Docker Hub Enterprise Installation - -Documenation coming soon. - diff --git a/docs/sources/docker-hub-enterprise/install.md b/docs/sources/docker-hub-enterprise/install.md new file mode 100644 index 0000000000000..84f9a321b3810 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/install.md @@ -0,0 +1,312 @@ +page_title: Docker Hub Enterprise: Install +page_description: Installation instructions for Docker Hub Enterprise +page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry + +# Install + +## Overview + +This document describes the process of obtaining, installing, and securing +Docker Hub Enterprise (DHE). DHE is installed from Docker containers. Once +installed, you will need to select a method of securing it. This doc will +explain the options you have for security and help you find the resources needed +to configure it according to your chosen method. More configuration details can +be found in the [DHE Configuration page](./configuration.md). + +Specifically, installation requires completion of these steps, in order: + +1. Acquire a license by purchasing DHE or requesting a trial license. +2. Install the commercially supported Docker Engine. +3. Install DHE +4. Add your license to your DHE instance + +## Licensing + +In order to run DHE, you will need to acquire a license, either by purchasing +DHE or requesting a trial license. The license will be associated with your +Docker Hub account or Docker Hub organization (so if you don't have an account, +you'll need to set one up, which can be done at the same time as your license +request). To get your license or start your trial, please contact our +[sales department](mailto:sales@docker.com). Upon completion of your purchase or +request, you will receive an email with further instructions for licensing your +copy of DHE. + +## Prerequisites + +DHE requires the following: + +* Commercially supported Docker Engine 1.6.0 or later running on an +Ubuntu 14.04 LTS, RHEL 7.1 or RHEL 7.0 host. (See below for instructions on how +to install the commercially supported Docker Engine.) + +> **Note:** In order to remain in compliance with your DHE support agreement, +> you must use the current version of commercially supported Docker Engine. +> Running the regular, open source version of Engine is **not** supported. + +* Your Docker daemon needs to be listening to the Unix socket (the default) so +that it can be bind-mounted into the DHE management containers, allowing +DHE to manage itself and its updates. For this reason, your DHE host will also +need internet connectivity so it can access the updates. + +* Your host also needs to have TCP ports `80` and `443` available for the DHE +container port mapping. + +* You will also need the Docker Hub user-name and password used when obtaining +the DHE license (or the user-name of an administrator of the Hub organization +that obtained an Enterprise license). + +## Installing the Commercially Supported Docker Engine + +Since DHE is installed using Docker, the commercially supported Docker Engine +must be installed first. This is done with an RPM or DEB repository, which you +set up using a Bash script downloaded from the [Docker Hub](https://hub.docker.com). + +### Download the commercially supported Docker Engine installation script + +To download the commercially supported Docker Engine Bash installation script, +log in to the [Docker Hub](https://hub.docker.com) with the user-name used to +obtain your license . Once you're logged in, go to the +["Enterprise Licenses"](https://registry.hub.docker.com/account/licenses/) page +in your Hub account's "Settings" section. + +Select your intended host operating system from the "Download CS Engine" drop- +down at the top right of the page and then, once the Bash setup script is +downloaded, follow the steps below appropriate for your chosen OS. + +![Docker Hub Docker engine install dropdown](../assets/docker-hub-org-enterprise-license-CSDE-dropdown.png) + +### RHEL 7.0/7.1 installation + +First, copy the downloaded Bash setup script to your RHEL host. Next, run the +following to install commercially supported Docker Engine and its dependencies, +and then start the Docker daemon: + +``` +$ sudo yum update && sudo yum upgrade +$ chmod 755 docker-cs-engine-rpm.sh +$ sudo ./docker-cs-engine-rpm.sh +$ sudo yum install docker-engine-cs +$ sudo systemctl enable docker.service +$ sudo systemctl start docker.service +``` + +In order to simplify using Docker, you can get non-sudo access to the Docker +socket by adding your user to the `docker` group, then logging out and back in +again: + +``` +$ sudo usermod -a -G docker $USER +$ exit +``` + +> **Note**: you may need to reboot your server to update its RHEL kernel. + +### Ubuntu 14.04 LTS installation + +First, copy the downloaded Bash setup script to your Ubuntu host. Next, run the +following to install commercially supported Docker Engine and its dependencies: + +``` +$ sudo apt-get update && sudo apt-get upgrade +$ chmod 755 docker-cs-engine-deb.sh +$ sudo ./docker-cs-engine-deb.sh +$ sudo apt-get install docker-engine-cs +``` + +In order to simplify using Docker, you can get non-sudo access to the Docker +socket by adding your user to the `docker` group, then logging out and back in +again: + +``` +$ sudo usermod -a -G docker $USER +$ exit +``` + +> **Note**: you may need to reboot your server to update its LTS kernel. + +## Installing Docker Hub Enterprise + +Once the commercially supported Docker Engine is installed, you can install DHE +itself. DHE is a self-installing application built and distributed using Docker +and the [Docker Hub](https://registry.hub.docker.com/). It is able to restart +and reconfigure itself using the Docker socket that is bind-mounted to its +container. + + +Start installing DHE by running the "dockerhubenterprise/manager" container: + +``` + $ sudo bash -c "$(sudo docker run dockerhubenterprise/manager install)" +``` + +> **Note**: `sudo` is needed for `dockerhubenterprise/manager` commands to +> ensure that the Bash script is run with full access to the Docker host. + +You can also find this command on the "Enterprise Licenses" section of your Hub +user profile. The command will execute a shell script that creates the needed +directories and then runs Docker to pull DHE's images and run its containers. + +Depending on your internet connection, this process may take several minutes to +complete. + +A successful installation will pull a large number of Docker images and should +display output similar to: + +``` +$ sudo bash -c "$(sudo docker run dockerhubenterprise/manager install)" +Unable to find image 'dockerhubenterprise/manager:latest' locally +Pulling repository dockerhubenterprise/manager +c46d58daad7d: Pulling image (latest) from dockerhubenterprise/manager +c46d58daad7d: Pulling image (latest) from dockerhubenterprise/manager +c46d58daad7d: Pulling dependent layers +511136ea3c5a: Download complete +fa4fd76b09ce: Pulling metadata +fa4fd76b09ce: Pulling fs layer +ff2996b1faed: Download complete +... +fd7612809d57: Pulling metadata +fd7612809d57: Pulling fs layer +fd7612809d57: Download complete +c46d58daad7d: Pulling metadata +c46d58daad7d: Pulling fs layer +c46d58daad7d: Download complete +c46d58daad7d: Download complete +Status: Downloaded newer image for dockerhubenterprise/manager:latest +Unable to find image 'dockerhubenterprise/manager:1.0.0_8ce62a61e058' locally +Pulling repository dockerhubenterprise/manager +c46d58daad7d: Download complete +511136ea3c5a: Download complete +fa4fd76b09ce: Download complete +1c8294cc5160: Download complete +117ee323aaa9: Download complete +2d24f826cb16: Download complete +33bfc1956932: Download complete +48f0dd6c9414: Download complete +65c30f72ecb2: Download complete +d4b29764d0d3: Download complete +5654f4fe5384: Download complete +9b9faa6ecd11: Download complete +0c275f56ca5c: Download complete +ff2996b1faed: Download complete +fd7612809d57: Download complete +Status: Image is up to date for dockerhubenterprise/manager:1.0.0_8ce62a61e058 +INFO [1.0.0_8ce62a61e058] Attempting to connect to docker engine dockerHost="unix:///var/run/docker.sock" +INFO [1.0.0_8ce62a61e058] Running install command +<...output truncated...> +Creating container docker_hub_enterprise_load_balancer with docker daemon unix:///var/run/docker.sock +Starting container docker_hub_enterprise_load_balancer with docker daemon unix:///var/run/docker.sock +Bringing up docker_hub_enterprise_log_aggregator. +Creating container docker_hub_enterprise_log_aggregator with docker daemon unix:///var/run/docker.sock +Starting container docker_hub_enterprise_log_aggregator with docker daemon unix:///var/run/docker.sock +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +0168f37b6221 dockerhubenterprise/log-aggregator:1.0.0_8ce62a61e058 "log-aggregator" 4 seconds ago Up 4 seconds docker_hub_enterprise_log_aggregator +b51c73bebe8b dockerhubenterprise/nginx:1.0.0_8ce62a61e058 "nginxWatcher" 4 seconds ago Up 4 seconds 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp docker_hub_enterprise_load_balancer +e8327864356b dockerhubenterprise/admin-server:1.0.0_8ce62a61e058 "server" 5 seconds ago Up 5 seconds 80/tcp docker_hub_enterprise_admin_server +52885a6e830a dockerhubenterprise/auth_server:alpha-a5a2af8a555e "garant --authorizat 6 seconds ago Up 5 seconds 8080/tcp +``` + +Once this process completes, you should be able to manage and configure your DHE +instance by pointing your browser to `https:///`. + +Your browser will warn you that this is an unsafe site, with a self-signed, +untrusted certificate. This is normal and expected; allow this connection +temporarily. + +### Setting the DHE Domain Name + +The DHE Administrator site will also warn that the "Domain Name" is not set. Go +to the "Settings" tab, and set the "Domain Name" to the full host-name of your +DHE server. +Hitting the "Save and Restart DHE Server" button will generate a new certificate, which will be used +by both the DHE Administrator web interface and the DHE Registry server. + +After the server restarts, you will again need to allow the connection to the untrusted DHE web admin site. + +![http settings page](../assets/admin-settings-http-unlicensed.png) + +Lastly, you will see a warning notifying you that this instance of DHE is +unlicensed. You'll correct this in the next step. + +### Add your license + +The DHE registry services will not start until you add your license. +To do that, you'll first download your license from the Docker Hub and then +upload it to your DHE web admin server. Follow these steps: + +1. If needed, log back into the [Docker Hub](https://hub.docker.com) + using the user-name you used when obtaining your license. Go to "Settings" (in + the menu under your user-name, top right) to get to your account settings, and + then click on "Enterprise Licenses" in the side bar at left. + +2. You'll see a list of available licenses. Click on the download button to + obtain the license file you'd like to use. + ![Download DHE license](../assets/docker-hub-org-enterprise-license.png) + +3. Next, go to your DHE instance in your browser and click on the Settings tab + and then the "License" tab. Click on the "Upload license file" button, which + will open a standard file browser. Locate and select the license file you + downloaded in step 2, above. Approve the selection to close the dialog. + ![http settings page](../assets/admin-settings-license.png) + +4. Click the "Save and Restart DHE" button, which will quit DHE and then restart it, registering + the new license. + +5. Verify the acceptance of the license by confirming that the "unlicensed copy" +warning is no longer present. + +### Securing DHE + +Securing DHE is **required**. You will not be able to push or pull from DHE until you secure it. + +There are several options and methods for securing DHE. For more information, +see the [configuration documentation](./configuration.md#security) + +### Using DHE to push and pull images + +Now that you have DHE configured with a "Domain Name" and have your client +Docker daemons configured with the required security settings, you can test your +setup by following the instructions for +[Using DHE to Push and pull images](./userguide.md#using-dhe-to-push-and-pull-images). + +### DHE web interface and registry authentication + +By default, there is no authentication set on either the DHE web admin +interface or the DHE registry. You can restrict access using an in-DHE +configured set of users (and passwords), or you can configure DHE to use LDAP- +based authentication. + +See [DHE Authentication settings](./configuration.md#authentication) for more +details. + +# Upgrading + +DHE has been designed to allow on-the-fly software upgrades. Start by +clicking on the "System Health" tab. In the upper, right-hand side of the +dashboard, below the navigation bar, you'll see the currently installed version +(e.g., `Current Version: 0.1.12345`). + +If your DHE instance is the latest available, you will also see the message: +"System Up to Date." + +If there is an upgrade available, you will see the message "System Update +Available!" alongside a button labeled "Update to Version X.XX". To upgrade, DHE +will pull new DHE container images from the Docker Hub. If you have not already +connected to Docker Hub, DHE will prompt you to log in. + +The upgrade process requires a small amount of downtime to complete. To complete +the upgrade, DHE will: +* Connect to the Docker Hub to pull new container images with the new version of +DHE. +* Deploy those containers +* Shut down the old containers +* Resolve any necessary links/urls. + +Assuming you have a decent internet connection, the entire upgrade process +should complete within a few minutes. + +## Next Steps + +For information on configuring DHE for your environment, take a look at the +[Configuration instructions](./configuration.md). + diff --git a/docs/sources/docker-hub-enterprise/quick-start.md b/docs/sources/docker-hub-enterprise/quick-start.md new file mode 100644 index 0000000000000..a813deb076c3f --- /dev/null +++ b/docs/sources/docker-hub-enterprise/quick-start.md @@ -0,0 +1,308 @@ +page_title: Docker Hub Enterprise: Quick-start: Basic Workflow +page_description: Brief tutorial on the basics of Docker Hub Enterprise user workflow +page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, image, repository + + +# Docker Hub Enterprise Quick Start: Basic User Workflow + +## Overview + +This Quick Start Guide will give you a hands-on look at the basics of using +Docker Hub Enterprise (DHE), Docker’s on-premise image storage application. +This guide will walk you through using DHE to complete a typical, and critical, +part of building a development pipeline: setting up a Jenkins instance. Once you +complete the task, you should have a good idea of how DHE works and how it might +be useful to you. + +Specifically, this guide demonstrates the process of retrieving the +[official Docker image for Jenkins](https://registry.hub.docker.com/_/jenkins/), +customizing it to suit your needs, and then hosting it on your private instance +of DHE located inside your enterprise’s firewalled environment. Your developers +will then be able to retrieve the custom Jenkins image in order to use it to +build CI/CD infrastructure for their projects, no matter the platform they’re +working from, be it a laptop, a VM, or a cloud provider. + +The guide will walk you through the following steps: + +1. Pulling the official Jenkins image from the public Docker Hub +2. Customizing the Jenkins image to suit your needs +3. Pushing the customized image to DHE +4. Pulling the customized image from DHE +4. Launching a container from the custom image +5. Using the new Jenkins container + +You should be able to complete this guide in about thirty minutes. + +> **Note:** This guide assumes you have installed a working instance of DHE +> reachable at dhe.yourdomain.com. If you need help installing and configuring +> DHE, please consult the +[installation instructions](./install.md). + + +## Pulling the official Jenkins image + +> **Note:** This guide assumes you are familiar with basic Docker concepts such +> as images, containers, and registries. If you need to learn more about Docker +> fundamentals, please consult the +> [Docker user guide](http://docs.docker.com/userguide/). + +First, you will retrieve a copy of the official Jenkins image from the Docker Hub. From the CLI of a machine running the Docker Engine on your network, use +the +[`docker pull`](https://docs.docker.com/reference/commandline/cli/#pull) +command to pull the public Jenkins image. + + $ docker pull jenkins + +> **Note:** This guide assumes you can run Docker commands from a machine where +> you are a member of the `docker` group, or have root privileges. Otherwise, you may +> need to add `sudo` to the example commands below. + +Docker will start the process of pulling the image from the Hub. Once it has completed, the Jenkins image should be visible in the output of a [`docker images`](https://docs.docker.com/reference/commandline/cli/#images) command: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + jenkins latest 1a7cc22b0ee9 6 days ago 662 MB + +> **Note:** Because the `pull` command did not specify any tags, it will pull +> the latest version of the public Jenkins image. If your enterprise environment +> requires you to use a specific version, add the tag for the version you need +> (e.g., `jenkins:1.565`). + +## Customizing the Jenkins image + +Now that you have a local copy of the Jenkins image, you’ll customize it so that +the containers it builds will integrate with your infrastructure. To do this, +you’ll create a custom Docker image that adds a Jenkins plugin that provides +fine grained user management. You’ll also configure Jenkins to be more secure by +disabling HTTP access and forcing it to use HTTPS. +You’ll do this by using a `Dockerfile` and the `docker build` command. + +> **Note:** These are obviously just a couple of examples of the many ways you +> can modify and configure Jenkins. Feel free to add or substitute whatever +> customization is necessary to run Jenkins in your environment. + +### Creating a `build` context + +In order to add the new plugin and configure HTTPS access to the custom Jenkins +image, you need to: + +1. Create text file that defines the new plugin +2. Create copies of the private key and certificate + +All of the above files need to be in the same directory as the Dockerfile you +will create in the next step. + +1. Create a build directory called `build`, and change to that new directory: + + $ mkdir build && cd build + +In this directory, create a new file called `plugins` and add the following +line: + + role-strategy:2.2.0 + +(The plugin version used above was the latest version at the time of writing.) + +2. You will also need to make copies of the server’s private key and certificate. Give the copies the following names — `https.key` and `https.pem`. + +> **Note:** Because creating new keys varies widely by platform and +> implementation, this guide won’t cover key generation. We assume you have +> access to existing keys. If you don’t have access, or can’t generate keys +> yourself, feel free to skip the steps involving them and HTTPS config. The +> guide will still walk you through building a custom Jenkins image and pushing +> and pulling that image using DHE. + +### Creating a Dockerfile + +In the same directory as the `plugins` file and the private key and certificate, +create a new [`Dockerfile`](https://docs.docker.com/reference/builder/) with the +following contents: + + FROM jenkins + + #New plugins must be placed in the plugins file + COPY plugins /usr/share/jenkins/plugins + + #The plugins.sh script will install new plugins + RUN /usr/local/bin/plugins.sh /usr/share/jenkins/plugins + + #Copy private key and cert to image + COPY https.pem /var/lib/jenkins/cert + COPY https.key /var/lib/jenkins/pk + + #Configure HTTP off and HTTPS on, using port 1973 + ENV JENKINS_OPTS --httpPort=-1 --httpsPort=1973 --httpsCertificate=/var/lib/jenkins/cert --httpsPrivateKey=/var/lib/jenkins/pk + +The first `COPY` instruction in the above will copy the `plugin` file created +earlier into the `/usr/share/jenkins` directory within the custom image you are +defining with the `Dockerfile`. + +The `RUN` instruction will execute the `/usr/local/bin/plugins.sh` script with +the newly copied `plugins` file, which will install the listed plugin. + +The next two `COPY` instructions copy the server’s private key and certificate +into the required directories within the new image. + +The `ENV` instruction creates an environment variable called `JENKINS_OPT` in +the image you are about to create. This environment variable will be present in +any containers launched form the image and contains the required settings to +tell Jenkins to disable HTTP and operate over HTTPS. + +> **Note:** You can specify any valid port number as part of the `JENKINS_OPT` +> environment variable declared above. The value `1973` used in the example is +> arbitrary. + +The `Dockerfile`, the `plugins` file, as well as the private key and +certificate, must all be in the same directory because the `docker build` +command uses the directory that contains the `Dockerfile` as its “build +context”. Only files contained within that “build context” will be included in +the image being built. + +### Building your custom image + +Now that the `Dockerfile`, the `plugins` file, and the files required for HTTPS +operation are created in your current working directory, you can build your +custom image using the +[`docker build` command](https://docs.docker.com/reference/commandline/cli/#build): + + docker build -t dhe.yourdomain.com/ci-infrastructure/jnkns-img . + +> **Note:** Don’t miss the period (`.`) at the end of the command above. This +> tells the `docker build` command to use the current working directory as the +> "build context". + +This command will build a new Docker image called `jnkns-img` which is based on +the public Jenkins image you pulled earlier, but contains all of your +customization. + +Please note the use of the `-t` flag in the `docker build` command above. The +`-t` flag lets you tag an image so it can be pushed to a custom repository. In +the example above, the new image is tagged so it can be pushed to the +`ci-infrastructure` Repository within the `dhe.yourdomain.com` registry (your +local DHE instance). This will be important when you need to `push` the +customized image to DHE later. + +A `docker images` command will now show the custom image alongside the Jenkins +image pulled earlier: + + $ sudo docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + dhe.yourdomain.com/ci-infrastructure/jnkns-img latest fc0ab3008d40 2 minutes ago 674.5 MB + jenkins latest 1a7cc22b0ee9 6 days ago 662 MB + +## Pushing to Docker Hub Enterprise + +Now that you’ve create the custom image, it can be pushed to DHE using the +[`docker push`command](https://docs.docker.com/reference/commandline/cli/#push): + + $ docker push dhe.yourdomain.com/ci-infrastructure/jnkns-img + 511136ea3c5a: Image successfully pushed + 848d84b4b2ab: Image successfully pushed + 71d9d77ae89e: Image already exists + + 492ed3875e3e: Image successfully pushed + fc0ab3008d40: Image successfully pushed + +You can view the traffic throughput while the custom image is being pushed from +the `System Health` tab in DHE: + +![DHE console push throughput](../assets/console-push.png) + +Once the image is successfully pushed, it can be downloaded, or pulled, by any +Docker host that has access to DHE. + +## Pulling from Docker Hub Enterprise +To pull the `jnkns-img` image from DHE, run the +[`docker pull`](https://docs.docker.com/reference/commandline/cli/#pull) +command from any Docker Host that has access to your DHE instance: + + $ docker pull dhe.yourdomain.com/ci-infrastructure/jnkns-img + latest: Pulling from dhe.yourdomain.com/ci-infrastructure/jnkns-img + 511136ea3c5a: Pull complete + 848d84b4b2ab: Pull complete + 71d9d77ae89e: Pull complete + + 492ed3875e3e: Pull complete + fc0ab3008d40: Pull complete + dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Status: Downloaded newer image for dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest + +You can view the traffic throughput while the custom image is being pulled from +the `System Health` tab in DHE: + +![DHE console pull throughput](../assets/console-pull.png) + +Now that the `jnkns-img` image has been pulled locally from DHE, you can view it +in the output of the `docker images` command: + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + dhe.yourdomain.com/ci-infrastructure/jnkns-img latest fc0ab3008d40 8 minutes ago 674.5 MB + +## Launching a custom Jenkins container + +Now that you’ve successfully pulled the customized Jenkins image from DHE, you +can create a container from it with the +[`docker run` command](https://docs.docker.com/reference/commandline/cli/#run): + + + $ docker run -p 1973:1973 --name jenkins01 dhe.yourdomain.com/ci-infrastructure/jnkns-img + /usr/share/jenkins/ref/init.groovy.d/tcp-slave-angent-port.groovy + /usr/share/jenkins/ref/init.groovy.d/tcp-slave-angent-port.groovy -> init.groovy.d/tcp-slave-angent-port.groovy + copy init.groovy.d/tcp-slave-angent-port.groovy to JENKINS_HOME + /usr/share/jenkins/ref/plugins/role-strategy.hpi + /usr/share/jenkins/ref/plugins/role-strategy.hpi -> plugins/role-strategy.hpi + copy plugins/role-strategy.hpi to JENKINS_HOME + /usr/share/jenkins/ref/plugins/dockerhub.hpi + /usr/share/jenkins/ref/plugins/dockerhub.hpi -> plugins/dockerhub.hpi + copy plugins/dockerhub.hpi to JENKINS_HOME + + INFO: Jenkins is fully up and running + +> **Note:** The `docker run` command above maps port 1973 in the container +> through to port 1973 on the host. This is the HTTPS port you specified in the +> Dockerfile earlier. If you specified a different HTTPS port in your +> Dockerfile, you will need to substitute this with the correct port numbers for +> your environment. + +You can view the newly launched a container, called `jenkins01`, using the +[`docker ps` command](https://docs.docker.com/reference/commandline/cli/#ps): + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS ...PORTS NAMES + 2e5d2f068504 dhe.yourdomain.com/ci-infrastructure/jnkns-img:latest "/usr/local/bin/jenk About a minute ago Up About a minute 50000/tcp, 0.0.0.0:1973->1973/tcp jenkins01 + + +## Accessing the new Jenkins container + +The previous `docker run` command mapped port `1973` on the container to port +`1973` on the Docker host, so the Jenkins Web UI can be accessed at +`https://:1973` (Don’t forget the `s` at the end of `https`.) + +> **Note:** If you are using a self-signed certificate, you may get a security +> warning from your browser telling you that the certificate is self-signed and +> not trusted. You may wish to add the certificate to the trusted store in order +> to prevent further warnings in the future. + +![Jenkins landing page](../assets/jenkins-ui.png) + +From within the Jenkins Web UI, navigate to `Manage Jenkins` (on the left-hand +pane) > `Manage Plugins` > `Installed`. The `Role-based Authorization Strategy` +plugin should be present with the `Uninstall` button available to the right. + +![Jenkins plugin manager](../assets/jenkins-plugins.png) + +In another browser session, try to access Jenkins via the default HTTP port 8080 +— `http://:8080`. This should result in a “connection timeout,” +showing that Jenkins is not available on its default port 8080 over HTTP. + +This demonstration shows your Jenkins image has been configured correctly for +HTTPS access, your new plugin was added and is ready for use, and HTTP access +has been disabled. At this point, any member of your team can use `docker pull` +to access the image from your DHE instance, allowing them to access a +configured, secured Jenkins instance that can run on any infrastructure. + +## Next Steps + +For more information on using DHE, take a look at the +[User's Guide](./userguide.md). diff --git a/docs/sources/docker-hub-enterprise/support.md b/docs/sources/docker-hub-enterprise/support.md new file mode 100644 index 0000000000000..ed60748a3a067 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/support.md @@ -0,0 +1,14 @@ +page_title: Docker Hub Enterprise: Support +page_description: Commercial Support +page_keywords: docker, documentation, about, technology, understanding, enterprise, hub, registry, support + +# Commercial Support + +Purchasing a DHE License or Commercial Support subscription means your questions +and issues about DHE will receive prioritized support. +You can file a ticket through [email](mailto:support@docker.com) from your +company email address, or visit our [support site](https://support.docker.com). +In either case, you'll need to verify your email address, and then you can +communicate with the support team either by email or web interface. + +**The availability of support depends on your [support subscription](https://www.docker.com/enterprise/support/)** diff --git a/docs/sources/docker-hub-enterprise/usage.md b/docs/sources/docker-hub-enterprise/usage.md deleted file mode 100644 index 252223ef7038c..0000000000000 --- a/docs/sources/docker-hub-enterprise/usage.md +++ /dev/null @@ -1,9 +0,0 @@ -page_title: Using Docker Hub Enterprise -page_description: Docker Hub Enterprise -page_keywords: docker hub enterprise - -# Docker Hub Enterprise - -Documenation coming soon. - - diff --git a/docs/sources/docker-hub-enterprise/userguide.md b/docs/sources/docker-hub-enterprise/userguide.md new file mode 100644 index 0000000000000..6d329722de8b6 --- /dev/null +++ b/docs/sources/docker-hub-enterprise/userguide.md @@ -0,0 +1,130 @@ +page_title: Docker Hub Enterprise: User guide +page_description: Documentation describing basic use of Docker Hub Enterprise +page_keywords: docker, documentation, about, technology, hub, enterprise + + +# Docker Hub Enterprise User's Guide + +This guide covers tasks and functions a user of Docker Hub Enterprise (DHE) will +need to know about, such as pushing or pulling images, etc. For tasks DHE +administrators need to accomplish, such as configuring or monitoring DHE, please +visit the [Administrator's Guide](./adminguide.md). + +## Using DHE to push and pull images + +The primary use case for DHE users is to push and pull images to and from the +DHE image storage service. The following instructions describe these procedures. + +> **Note**: If your DHE instance has authentication enabled, you will need to +>use your command line to `docker login ` (e.g., `docker login +> dhe.yourdomain.com`). +> +> Failures due to unauthenticated `docker push` and `docker pull` commands will +> look like : +> +> $ docker pull dhe.yourdomain.com/hello-world +> Pulling repository dhe.yourdomain.com/hello-world +> FATA[0001] Error: image hello-world:latest not found +> +> $ docker push dhe.yourdomain.com/hello-world +> The push refers to a repository [dhe.yourdomain.com/hello-world] (len: 1) +> e45a5af57b00: Image push failed +> FATA[0001] Error pushing to registry: token auth attempt for registry https://dhe.yourdomain.com/v2/: https://> dhe.yourdomain.com/auth/v2/token/?scope=repository%3Ahello-world%3Apull%2Cpush&service=dhe.yourdomain.com > request failed with status: 401 Unauthorized + + +1. Pull the `hello-world` official image from the Docker Hub. By default, if +Docker can't find an image locally, it will attempt to pull the image from the +Docker Hub. + + `$ docker pull hello-world` + +2. List your available images. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + hello-world latest e45a5af57b00 3 months ago 910 B + + Your list should include the `hello-world` image from the earlier run. + +3. Re-tag the `hello-world` image so that it refers to your DHE server. + + `$ docker tag hello-world:latest dhe.yourdomain.com/demouser/hello-mine:latest` + + The command labels a `hello-world:latest` image using a new tag in the + `[REGISTRYHOST/][USERNAME/]NAME[:TAG]` format. The `REGISTRYHOST` in this + case is the DHE server, `dhe.yourdomain.com`, and the `USERNAME` is + `demouser`. + +4. List your new image. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + hello-world latest e45a5af57b00 3 months ago 910 B + dhe.yourdomain.com/demouser/hello-mine latest e45a5af57b00 3 months ago 910 B + + You should see your new image label in the listing, with the same `IMAGE ID` + as the Official image. + +5. Push this new image to your DHE server. + + `$ docker push dhe.yourdomain.com/demouser/hello-mine:latest` + +6. Set up a test of DHE by removing all images from your local environment: + + `$ docker rmi -f $(docker images -q -a)` + + This command is for illustrative purposes only: removing the image forces + any subsequent `run` to pull from a remote registry (such as DHE) rather + than from a local cache. If you run `docker images` after this you should + not see any instance of `hello-world` or `hello-mine` in your images list. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + +7. Try running `hello-mine`. + + $ docker run hello-mine + Unable to find image 'hello-mine:latest' locally + Pulling repository hello-mine + FATA[0007] Error: image library/hello-mine:latest not found + + The `run` command fails because your new image doesn't exist on the Docker Hub. + +8. Run `hello-mine` again, this time pointing it to pull from DHE: + + $ docker run dhe.yourdomain.com/demouser/hello-mine + latest: Pulling from dhe.yourdomain.com/demouser/hello-mine + 511136ea3c5a: Pull complete + 31cbccb51277: Pull complete + e45a5af57b00: Already exists + Digest: sha256:45f0de377f861694517a1440c74aa32eecc3295ea803261d62f950b1b757bed1 + Status: Downloaded newer image for dhe.yourdomain.com/demouser/hello-mine:latest + + If you run `docker images` after this you'll see a `hello-mine` image. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE + dhe.yourdomain.com/demouser/hello-mine latest e45a5af57b00 3 months ago 910 B + +> **Note**: If the Docker daemon on which you are running `docker push` doesn't +> have the right certificates set up, you will get an error similar to: +> +> $ docker push dhe.yourdomain.com/demouser/hello-world +> FATA[0000] Error response from daemon: v1 ping attempt failed with error: Get https://dhe.yourdomain.com/v1/_ping: x509: certificate signed by unknown authority. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry dhe.yourdomain.com` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/dhe.yourdomain.com/ca.crt + +9. You have now successfully created a custom image, `hello-mine`, tagged it, + and pushed it to the image storage provided by your DHE instance. You then + pulled that image back down from DHE and onto your machine, where you can + use it to create a container containing the "Hello World" application.. + +## Next Steps + +For information on administering DHE, take a look at the [Administrator's Guide](./adminguide.md). + + + diff --git a/docs/sources/docker-hub/accounts.md b/docs/sources/docker-hub/accounts.md index e4623f99809c3..360eb371f395d 100644 --- a/docs/sources/docker-hub/accounts.md +++ b/docs/sources/docker-hub/accounts.md @@ -4,7 +4,7 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub # Accounts on Docker Hub -## Docker Hub Accounts +## Docker Hub accounts You can `search` for Docker images and `pull` them from [Docker Hub](https://hub.docker.com) without signing in or even having an @@ -12,7 +12,7 @@ account. However, in order to `push` images, leave comments or to *star* a repository, you are going to need a [Docker Hub](https://hub.docker.com) account. -### Registration for a Docker Hub Account +### Registration for a Docker Hub account You can get a [Docker Hub](https://hub.docker.com) account by [signing up for one here](https://hub.docker.com/account/signup/). A valid @@ -32,7 +32,7 @@ If you can't access your account for some reason, you can reset your password from the [*Password Reset*](https://hub.docker.com/account/forgot-password/) page. -## Organizations & Groups +## Organizations and groups Also available on the Docker Hub are organizations and groups that allow you to collaborate across your organization or team. You can see what diff --git a/docs/sources/docker-hub/builds.md b/docs/sources/docker-hub/builds.md index 1613ad1d4b0e7..541bc159462cf 100644 --- a/docs/sources/docker-hub/builds.md +++ b/docs/sources/docker-hub/builds.md @@ -8,20 +8,18 @@ page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub *Automated Builds* are a special feature of Docker Hub which allow you to use [Docker Hub's](https://hub.docker.com) build clusters to automatically -create images from a specified `Dockerfile` and a GitHub or Bitbucket repository -(or "context"). The system will clone your repository and build the image -described by the `Dockerfile` using the repository as the context. The -resulting automated image will then be uploaded to the Docker Hub registry -and marked as an *Automated Build*. +create images from a GitHub or Bitbucket repository containing a `Dockerfile` +The system will clone your repository and build the image described by the +`Dockerfile` using the directory the `Dockerfile` is in (and subdirectories) +as the build context. The resulting automated image will then be uploaded +to the Docker Hub registry and marked as an *Automated Build*. Automated Builds have several advantages: * Users of *your* Automated Build can trust that the resulting image was built exactly as specified. - * The `Dockerfile` will be available to anyone with access to -your repository on the Docker Hub registry. - +your repository on the Docker Hub registry. * Because the process is automated, Automated Builds help to make sure that your repository is always up to date. @@ -33,16 +31,26 @@ http://docs.docker.com/userguide/dockerhub/#creating-a-docker-hub-account) and on GitHub and/or Bitbucket. In either case, the account needs to be properly validated and activated before you can link to it. -## Setting up Automated Builds with GitHub - -In order to set up an Automated Build, you need to first link your -[Docker Hub](https://hub.docker.com) account with a GitHub account. +The first time you to set up an Automated Build, your +[Docker Hub](https://hub.docker.com) account will need to be linked to +a GitHub or Bitbucket account. This will allow the registry to see your repositories. -> *Note:* +If you have previously linked your Docker Hub account, and want to view or modify +that link, click on the "Manage - Settings" link in the sidebar, and then +"Linked Accounts" in your Settings sidebar. + +## Automated Builds from GitHub + +If you've previously linked your Docker Hub account to your GitHub account, +you'll be able to skip to the [Creating an Automated Build](#creating-an-automated-build). + +### Linking your Docker Hub account to a GitHub account + +> *Note:* > Automated Builds currently require *read* and *write* access since > [Docker Hub](https://hub.docker.com) needs to setup a GitHub service -> hook. We have no choice here, this is how GitHub manages permissions, sorry! +> hook. We have no choice here, this is how GitHub manages permissions, sorry! > We do guarantee nothing else will be touched in your account. To get started, log into your Docker Hub account and click the @@ -51,17 +59,99 @@ To get started, log into your Docker Hub account and click the Select the [GitHub service](https://registry.hub.docker.com/associate/github/). -Then follow the onscreen instructions to authorize and link your +When linking to GitHub, you'll need to select either "Public and Private", +or "Limited" linking. + +The "Public and Private" option is the easiest to use, +as it grants the Docker Hub full access to all of your repositories. GitHub +also allows you to grant access to repositories belonging to your GitHub +organizations. + +By choosing the "Limited" linking, your Docker Hub account only gets permission +to access your public data and public repositories. + +Follow the onscreen instructions to authorize and link your GitHub account to Docker Hub. Once it is linked, you'll be able to -choose a repo from which to create the Automatic Build. +choose a source repository from which to create the Automatic Build. + +You will be able to review and revoke Docker Hub's access by visiting the +[GitHub User's Applications settings](https://github.com/settings/applications). + +> **Note**: If you delete the GitHub account linkage that is used for one of your +> automated build repositories, the previously built images will still be available. +> If you re-link to that GitHub account later, the automated build can be started +> using the "Start Build" button on the Hub, or if the webhook on the GitHub repository +> still exists, will be triggered by any subsequent commits. + +### Auto builds and limited linked GitHub accounts. + +If you selected to link your GitHub account with only a "Limited" link, then +after creating your automated build, you will need to either manually trigger a +Docker Hub build using the "Start a Build" button, or add the GitHub webhook +manually, as described in [GitHub Service Hooks](#github-service-hooks). + +### Changing the GitHub user link + +If you want to remove, or change the level of linking between your GitHub account +and the Docker Hub, you need to do this in two places. + +First, remove the "Linked Account" from your Docker Hub "Settings". +Then go to your GitHub account's Personal settings, and in the "Applications" +section, "Revoke access". + +You can now re-link your account at any time. + +### GitHub organizations + +GitHub organizations and private repositories forked from organizations will be +made available to auto build using the "Docker Hub Registry" application, which +needs to be added to the organization - and then will apply to all users. + +To check, or request access, go to your GitHub user's "Setting" page, select the +"Applications" section from the left side bar, then click the "View" button for +"Docker Hub Registry". + +![Check User access to GitHub](/docker-hub/hub-images/gh-check-user-org-dh-app-access.png) + +The organization's administrators may need to go to the Organization's "Third +party access" screen in "Settings" to Grant or Deny access to the Docker Hub +Registry application. This change will apply to all organization members. + +![Check Docker Hub application access to Organization](/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png) + +More detailed access controls to specific users and GitHub repositories would be +managed using the GitHub People and Teams interfaces. ### Creating an Automated Build You can [create an Automated Build]( https://registry.hub.docker.com/builds/github/select/) from any of your -public or private GitHub repositories with a `Dockerfile`. +public or private GitHub repositories that have a `Dockerfile`. + +Once you've selected the source repository, you can then configure: + +- The Hub user/org the repository is built to - either your Hub account name, +or the name of any Hub organizations your account is in +- The Docker repository name the image is built to +- If the Docker repository should be "Public" or "Private" + You can change the accessibility options after the repository has been created. + If you add a Private repository to a Hub user, then you can only add other users + as collaborators, and those users will be able to view and pull all images in that + repository. To configure more granular access permissions, such as using groups of + users or allow different users access to different image tags, then you need + to add the Private repository to a Hub organization that your user has Administrator + privilege on. +- If you want the GitHub to notify the Docker Hub when a commit is made, and thus trigger + a rebuild of all the images in this automated build. + +You can also select one or more +- The git branch/tag, which repository sub-directory to use as the context +- The Docker image tag name + +You can set a description for the repository by clicking "Description" link in the righthand side bar after the automated build - note that the "Full Description" will be over-written next build from the README.md file. +has been created. -### GitHub Submodules +### GitHub private submodules If your GitHub repository contains links to private submodules, you'll get an error message in your build. @@ -114,17 +204,14 @@ can be limited to read-only access to just the repositories required to build. - -### GitHub Organizations -GitHub organizations will appear once your membership to that organization is -made public on GitHub. To verify, you can look at the members tab for your -organization on GitHub. +### GitHub service hooks -### GitHub Service Hooks +The GitHub Service hook allows GitHub to notify the Docker Hub when something has +been committed to that git repository. You will need to add the Service Hook manually +if your GitHub account is "Limited" linked to the Docker Hub. -Follow the steps below to configure the GitHub service -hooks for your Automated Build: +Follow the steps below to configure the GitHub Service hooks for your Automated Build: @@ -146,14 +233,16 @@ hooks for your Automated Build: - - + + + +
Webhooks & Services Click on "Webhooks & Services" on the left side of the page.
3.Find the service labeled DockerFind the service labeled "Docker" and click on it.
4.Activate Service HooksFind the service labeled DockerFind the service labeled "Docker" (or click on "Add service") and click on it.
4.Activate Service Hooks Make sure the "Active" checkbox is selected and click the "Update service" button to save your changes.
-## Setting up Automated Builds with Bitbucket +## Automated Builds with Bitbucket In order to setup an Automated Build, you need to first link your [Docker Hub](https://hub.docker.com) account with a Bitbucket account. @@ -249,7 +338,7 @@ $ curl --data "build=true" -X POST https://registry.hub.docker.com/u/svendowidei OK ``` -> **Note:** +> **Note:** > You can only trigger one build at a time and no more than one > every five minutes. If you already have a build pending, or if you > recently submitted a build request, those requests *will be ignored*. diff --git a/docs/sources/docker-hub/home.md b/docs/sources/docker-hub/home.md index 15baf7b83a39f..3f81208c194fc 100644 --- a/docs/sources/docker-hub/home.md +++ b/docs/sources/docker-hub/home.md @@ -1,8 +1,8 @@ -page_title: The Docker Hub Registry Help +page_title: The Docker Hub Registry help page_description: The Docker Registry help documentation home page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation -# The Docker Hub Registry Help +# The Docker Hub Registry help ## Introduction diff --git a/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png b/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png new file mode 100644 index 0000000000000..0df38c69465dd Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh-check-admin-org-dh-app-access.png differ diff --git a/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png b/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png new file mode 100644 index 0000000000000..13ad6468f6604 Binary files /dev/null and b/docs/sources/docker-hub/hub-images/gh-check-user-org-dh-app-access.png differ diff --git a/docs/sources/docker-hub/index.md b/docs/sources/docker-hub/index.md index c29a5f787314f..3651497e2c212 100644 --- a/docs/sources/docker-hub/index.md +++ b/docs/sources/docker-hub/index.md @@ -1,4 +1,4 @@ -page_title: The Docker Hub Help +page_title: The Docker Hub help page_description: The Docker Help documentation home page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, documentation, accounts, organizations, repositories, groups @@ -16,7 +16,7 @@ account and manage your organizations and groups. Find out how to share your Docker images in [Docker Hub repositories](repos/) and how to store and manage private images. -## [Automated Builds](builds/) +## [Automated builds](builds/) Learn how to automate your build and deploy pipeline with [Automated Builds](builds/) diff --git a/docs/sources/docker-hub/official_repos.md b/docs/sources/docker-hub/official_repos.md index 4ec431238bbbc..eb73b4bc20117 100644 --- a/docs/sources/docker-hub/official_repos.md +++ b/docs/sources/docker-hub/official_repos.md @@ -1,189 +1,106 @@ -page_title: Guidelines for Official Repositories on Docker Hub +page_title: Official Repositories on Docker Hub page_description: Guidelines for Official Repositories on Docker Hub page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, docs, official, image, documentation -# Guidelines for Creating and Documenting Official Repositories - -## Introduction - -You’ve been given the job of creating an image for an Official Repository -hosted on [Docker Hub Registry](https://registry.hub.docker.com/). These are -our guidelines for getting that task done. Even if you’re not -planning to create an Official Repo, you can think of these guidelines as best -practices for image creation generally. - -This document consists of two major sections: - -* A list of expected files, resources and supporting items for your image, -along with best practices for creating those items -* Examples embodying those practices - -## Expected Files & Resources - -### A Git repository - -Your image needs to live in a Git repository, preferably on GitHub. (If you’d -like to use a different provider, please [contact us](mailto:feedback@docker.com) -directly.) Docker **strongly** recommends that this repo be publicly -accessible. - -If the repo is private or has otherwise limited access, you must provide a -means of at least “read-only” access for both general users and for the -docker-library maintainers, who need access for review and building purposes. - -### A Dockerfile - -Complete information on `Dockerfile`s can be found in the [Reference section](https://docs.docker.com/reference/builder/). -We also have a page discussing [best practices for writing `Dockerfile`s](/articles/dockerfile_best-practices). -Your `Dockerfile` should adhere to the following: - -* It must be written either by using `FROM scratch` or be based on another, -established Official Image. -* It must follow `Dockerfile` best practices. These are discussed on the -[best practices page](/articles/dockerfile_best-practices). In addition, -Docker engineer Michael Crosby has some good tips for `Dockerfiles` in -this [blog post](http://crosbymichael.com/dockerfile-best-practices-take-2.html). - -While [`ONBUILD` triggers](https://docs.docker.com/reference/builder/#onbuild) -are not required, if you choose to use them you should: - -* Build both `ONBUILD` and non-`ONBUILD` images, with the `ONBUILD` image -built `FROM` the non-`ONBUILD` image. -* The `ONBUILD` image should be specifically tagged, for example, `ruby: -latest`and `ruby:onbuild`, or `ruby:2` and `ruby:2-onbuild` - -### A short description - -Include a brief description of your image (in plaintext). Only one description -is required; you don’t need additional descriptions for each tag. The file -should also: - -* Be named `README-short.txt` -* Reside in the repo for the “latest” tag -* Not exceed 100 characters - -### A logo - -Include a logo of your company or the product (png format preferred). Only one -logo is required; you don’t need additional logo files for each tag. The logo -file should have the following characteristics: - -* Be named `logo.png` -* Should reside in the repo for the “latest” tag -* Should fit inside a 200px square, maximized in one dimension (preferably the -width) -* Square or wide (landscape) is preferred over tall (portrait), but exceptions -can be made based on the logo needed - -### A long description - -Include a comprehensive description of your image (in Markdown format, GitHub -flavor preferred). Only one description is required; you don’t need additional -descriptions for each tag. The file should also: - -* Be named `README.md` -* Reside in the repo for the “latest” tag -* Be no longer than absolutely necessary, while still addressing all the -content requirements - -In terms of content, the long description must include the following sections: - -* Overview & links -* How-to/usage -* Issues & contributions - -#### Overview & links - -This section should provide: - -* an overview of the software contained in the image, similar to the -introduction in a Wikipedia entry - -* a selection of links to outside resources that help to describe the software - -* a *mandatory* link to the `Dockerfile` - -#### How-to/usage - -A section that describes how to run and use the image, including common use -cases and example `Dockerfile`s (if applicable). Try to provide clear, step-by- -step instructions wherever possible. - -##### Issues & contributions - -In this section, point users to any resources that can help them contribute to -the project. Include contribution guidelines and any specific instructions -related to your development practices. Include a link to -[Docker’s resources for contributors](https://docs.docker.com/contributing/contributing/). -Be sure to include contact info, handles, etc. for official maintainers. - -Also include information letting users know where they can go for help and how -they can file issues with the repo. Point them to any specific IRC channels, -issue trackers, contacts, additional “how-to” information or other resources. - -### License - -Include a file, `LICENSE`, of any applicable license. Docker recommends using -the license of the software contained in the image, provided it allows Docker, -Inc. to legally build and distribute the image. Otherwise, Docker recommends -adopting the [Expat license](http://directory.fsf.org/wiki/License:Expat) -(a.k.a., the MIT or X11 license). - -## Examples - -Below are sample short and long description files for an imaginary image -containing Ruby on Rails. - -### Short description - -`README-short.txt` - -`Ruby on Rails is an open-source application framework written in Ruby. It emphasizes best practices such as convention over configuration, active record pattern, and the model-view-controller pattern.` - -### Long description - -`README.md` - -```markdown -# What is Ruby on Rails - -Ruby on Rails, often simply referred to as Rails, is an open source web application framework which runs via the Ruby programming language. It is a full-stack framework: it allows creating pages and applications that gather information from the web server, talk to or query the database, and render templates out of the box. As a result, Rails features a routing system that is independent of the web server. - -> [wikipedia.org/wiki/Ruby_on_Rails](https://en.wikipedia.org/wiki/Ruby_on_Rails) - -# How to use this image - -## Create a `Dockerfile` in your rails app project - - FROM rails:onbuild - -Put this file in the root of your app, next to the `Gemfile`. - -This image includes multiple `ONBUILD` triggers so that should be all that you need for most applications. The build will `ADD . /usr/src/app`, `RUN bundle install`, `EXPOSE 3000`, and set the default command to `rails server`. - -Then build and run the Docker image. - - docker build -t my-rails-app . - docker run --name some-rails-app -d my-rails-app - -Test it by visiting `http://container-ip:3000` in a browser. On the other hand, if you need access outside the host on port 8080: - - docker run --name some-rails-app -p 8080:3000 -d my-rails-app - -Then go to `http://localhost:8080` or `http://host-ip:8080` in a browser. -``` - -For more examples, take a look at these repos: - -* [Go](https://github.com/docker-library/golang) -* [PostgreSQL](https://github.com/docker-library/postgres) -* [Buildpack-deps](https://github.com/docker-library/buildpack-deps) -* ["Hello World" minimal container](https://github.com/docker-library/hello-world) -* [Node](https://github.com/docker-library/node) - -## Submit your repo - -Once you've checked off everything in these guidelines, and are confident your -image is ready for primetime, please contact us at -[partners@docker.com](mailto:partners@docker.com) to have your project -considered for the Official Repos program. +# Official Repositories on Docker Hub + +The Docker [Official Repositories](http://registry.hub.docker.com/official) are +a curated set of Docker repositories that are promoted on Docker Hub and +supported by Docker, Inc. They are designed to: + +* Provide essential base OS repositories (for example, + [`ubuntu`](https://registry.hub.docker.com/_/ubuntu/), + [`centos`](https://registry.hub.docker.com/_/centos/)) that serve as the + starting point for the majority of users. + +* Provide drop-in solutions for popular programming language runtimes, data + stores, and other services, similar to what a Platform-as-a-Service (PAAS) + would offer. + +* Exemplify [`Dockerfile` best practices](/articles/dockerfile_best-practices) + and provide clear documentation to serve as a reference for other `Dockerfile` + authors. + +* Ensure that security updates are applied in a timely manner. This is + particularly important as many Official Repositories are some of the most + popular on Docker Hub. + +* Provide a channel for software vendors to redistribute up-to-date and + supported versions of their products. Organization accounts on Docker Hub can + also serve this purpose, without the careful review or restrictions on what + can be published. + +Docker, Inc. sponsors a dedicated team that is responsible for reviewing and +publishing all Official Repositories content. This team works in collaboration +with upstream software maintainers, security experts, and the broader Docker +community. + +While it is preferrable to have upstream software authors maintaining their +corresponding Official Repositories, this is not a strict requirement. Creating +and maintaining images for Official Repositories is a public process. It takes +place openly on GitHub where participation is encouraged. Anyone can provide +feedback, contribute code, suggest process changes, or even propose a new +Official Repository. + +## Should I use Official Repositories? + +New Docker users are encouraged to use the Official Repositories in their +projects. These repositories have clear documentation, promote best practices, +and are designed for the most common use cases. Advanced users are encouraged to +review the Official Repositories as part of their `Dockerfile` learning process. + +A common rationale for diverging from Official Repositories is to optimize for +image size. For instance, many of the programming language stack images contain +a complete build toolchain to support installation of modules that depend on +optimized code. An advanced user could build a custom image with just the +necessary pre-compiled libraries to save space. + +A number of language stacks such as +[`python`](https://registry.hub.docker.com/_/python/) and +[`ruby`](https://registry.hub.docker.com/_/ruby/) have `-slim` tag variants +designed to fill the need for optimization. Even when these "slim" variants are +insufficient, it is still recommended to inherit from an Official Repository +base OS image to leverage the ongoing maintenance work, rather than duplicating +these efforts. + +## How can I get involved? + +All Official Repositories contain a **User Feedback** section in their +documentation which covers the details for that specific repository. In most +cases, the GitHub repository which contains the Dockerfiles for an Official +Repository also has an active issue tracker. General feedback and support +questions should be directed to `#docker-library` on Freenode IRC. + +## How do I create a new Official Repository? + +From a high level, an Official Repository starts out as a proposal in the form +of a set of GitHub pull requests. You'll find detailed and objective proposal +requirements in the following GitHub repositories: + +* [docker-library/official-images](https://github.com/docker-library/official-images) + +* [docker-library/docs](https://github.com/docker-library/docs) + +The Official Repositories team, with help from community contributors, formally +review each proposal and provide feedback to the author. This initial review +process may require a bit of back and forth before the proposal is accepted. + +There are also subjective considerations during the review process. These +subjective concerns boil down to the basic question: "is this image generally +useful?" For example, the [`python`](https://registry.hub.docker.com/_/python/) +Official Repository is "generally useful" to the large Python developer +community, whereas an obscure text adventure game written in Python last week is +not. + +When a new proposal is accepted, the author becomes responsibile for keeping +their images up-to-date and responding to user feedback. The Official +Repositories team becomes responsibile for publishing the images and +documentation on Docker Hub. Updates to the Official Repository follow the same +pull request process, though with less review. The Official Repositories team +ultimately acts as a gatekeeper for all changes, which helps mitigate the risk +of quality and security issues from being introduced. + +> **Note**: If you are interested in proposing an Official Repository, but would +> like to discuss it with Docker, Inc. privately first, please send your +> inquiries to partners@docker.com. There is no fast-track or pay-for-status +> option. diff --git a/docs/sources/docker-hub/repos.md b/docs/sources/docker-hub/repos.md index 35cd4f8ccbeb5..a48040fb55806 100644 --- a/docs/sources/docker-hub/repos.md +++ b/docs/sources/docker-hub/repos.md @@ -1,8 +1,8 @@ -page_title: Repositories and Images on Docker Hub -page_description: Repositories and Images on Docker Hub +page_title: Repositories and images on Docker Hub +page_description: Repositories and images on Docker Hub page_keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, webhooks, docs, documentation -# Repositories and Images on Docker Hub +# Repositories and images on Docker Hub ![repositories](/docker-hub/hub-images/repos.png) @@ -53,8 +53,8 @@ You can read more about that [here](accounts/). ## Official Repositories -The Docker Hub contains a number of [official -repositories](http://registry.hub.docker.com/official). These are +The Docker Hub contains a number of [Official +Repositories](http://registry.hub.docker.com/official). These are certified repositories from vendors and contributors to Docker. They contain Docker images from vendors like Canonical, Oracle, and Red Hat that you can use to build applications and services. @@ -63,11 +63,11 @@ If you use Official Repositories you know you're using a supported, optimized and up-to-date image to power your applications. > **Note:** -> If you would like to contribute an official repository for your -> organization, product or team you can see more information -> [here](https://github.com/docker/stackbrew). +> If you would like to contribute an Official Repository for your +> organization, see [Official Repositories on Docker +> Hub](/docker-hub/official_repos) for more information. -## Private Repositories +## Private repositories Private repositories allow you to have repositories that contain images that you want to keep private, either to your own account or within an diff --git a/docs/sources/examples.md b/docs/sources/examples.md index 9dcd67a643343..f4d5b868ef699 100644 --- a/docs/sources/examples.md +++ b/docs/sources/examples.md @@ -1,9 +1,9 @@ # Examples - - [Dockerizing a Node.js Web App](nodejs_web_app/) - - [Dockerizing a Redis Service](running_redis_service/) - - [Dockerizing an SSH Daemon Service](running_ssh_service/) - - [Dockerizing a CouchDB Service](couchdb_data_volumes/) - - [Dockerizing a PostgreSQL Service](postgresql_service/) + - [Dockerizing a Node.js web app](nodejs_web_app/) + - [Dockerizing a Redis service](running_redis_service/) + - [Dockerizing an SSH daemon service](running_ssh_service/) + - [Dockerizing a CouchDB service](couchdb_data_volumes/) + - [Dockerizing a PostgreSQL service](postgresql_service/) - [Dockerizing MongoDB](mongodb/) - - [Dockerizing a Riak Service](running_riak_service/) + - [Dockerizing a Riak service](running_riak_service/) diff --git a/docs/sources/examples/apt-cacher-ng.md b/docs/sources/examples/apt-cacher-ng.md index 9a3631220ec4c..57aa669666eff 100644 --- a/docs/sources/examples/apt-cacher-ng.md +++ b/docs/sources/examples/apt-cacher-ng.md @@ -2,7 +2,7 @@ page_title: Dockerizing an apt-cacher-ng service page_description: Installing and running an apt-cacher-ng service page_keywords: docker, example, package installation, networking, debian, ubuntu -# Dockerizing an Apt-Cacher-ng Service +# Dockerizing an apt-cacher-ng service > **Note**: > - **If you don't like sudo** then see [*Giving non-root diff --git a/docs/sources/examples/couchdb_data_volumes.md b/docs/sources/examples/couchdb_data_volumes.md index 483168ae21178..27bce34a95b7e 100644 --- a/docs/sources/examples/couchdb_data_volumes.md +++ b/docs/sources/examples/couchdb_data_volumes.md @@ -1,8 +1,8 @@ -page_title: Dockerizing a CouchDB Service +page_title: Dockerizing a CouchDB service page_description: Sharing data between 2 couchdb databases page_keywords: docker, example, package installation, networking, couchdb, data volumes -# Dockerizing a CouchDB Service +# Dockerizing a CouchDB service > **Note**: > - **If you don't like sudo** then see [*Giving non-root diff --git a/docs/sources/examples/nodejs_web_app.md b/docs/sources/examples/nodejs_web_app.md index 1db61ae624ca4..ff7179a811496 100644 --- a/docs/sources/examples/nodejs_web_app.md +++ b/docs/sources/examples/nodejs_web_app.md @@ -1,8 +1,8 @@ -page_title: Dockerizing a Node.js Web App +page_title: Dockerizing a Node.js web app page_description: Installing and running a Node.js app with Docker page_keywords: docker, example, package installation, node, centos -# Dockerizing a Node.js Web App +# Dockerizing a Node.js web app > **Note**: > - **If you don't like sudo** then see [*Giving non-root diff --git a/docs/sources/examples/running_redis_service.md b/docs/sources/examples/running_redis_service.md index a00db9896420f..c46bb09c770f7 100644 --- a/docs/sources/examples/running_redis_service.md +++ b/docs/sources/examples/running_redis_service.md @@ -2,12 +2,12 @@ page_title: Dockerizing a Redis service page_description: Installing and running an redis service page_keywords: docker, example, package installation, networking, redis -# Dockerizing a Redis Service +# Dockerizing a Redis service Very simple, no frills, Redis service attached to a web application using a link. -## Create a docker container for Redis +## Create a Docker container for Redis Firstly, we create a `Dockerfile` for our new Redis image. diff --git a/docs/sources/examples/running_riak_service.md b/docs/sources/examples/running_riak_service.md index 6d49cc87eb04b..1b14c3a417b31 100644 --- a/docs/sources/examples/running_riak_service.md +++ b/docs/sources/examples/running_riak_service.md @@ -2,7 +2,7 @@ page_title: Dockerizing a Riak service page_description: Build a Docker image with Riak pre-installed page_keywords: docker, example, package installation, networking, riak -# Dockerizing a Riak Service +# Dockerizing a Riak service The goal of this example is to show you how to build a Docker image with Riak pre-installed. diff --git a/docs/sources/examples/running_ssh_service.md b/docs/sources/examples/running_ssh_service.md index e2fc3782d5ca2..b1000a04acee0 100644 --- a/docs/sources/examples/running_ssh_service.md +++ b/docs/sources/examples/running_ssh_service.md @@ -2,7 +2,7 @@ page_title: Dockerizing an SSH service page_description: Installing and running an SSHd service on Docker page_keywords: docker, example, package installation, networking -# Dockerizing an SSH Daemon Service +# Dockerizing an SSH daemon service ## Build an `eg_sshd` image diff --git a/docs/sources/http-routingtable.md b/docs/sources/http-routingtable.md index 07029d2ca8d83..14e1dfcd2e30d 100644 --- a/docs/sources/http-routingtable.md +++ b/docs/sources/http-routingtable.md @@ -1,4 +1,4 @@ -# HTTP Routing Table +# HTTP routing table [**/api**](#cap-/api) | [**/auth**](#cap-/auth) | [**/build**](#cap-/build) | [**/commit**](#cap-/commit) | diff --git a/docs/sources/index.md b/docs/sources/index.md index 993603eb33cc8..ef827acba1a3d 100644 --- a/docs/sources/index.md +++ b/docs/sources/index.md @@ -75,18 +75,18 @@ The [Understanding Docker section](introduction/understanding-docker.md) will he - See how Docker compares to virtual machines - See some common use cases. -### Installation Guides +### Installation guides The [installation section](/installation/#installation) will show you how to install Docker on a variety of platforms. -### Docker User Guide +### Docker user guide To learn about Docker in more detail and to answer questions about usage and implementation, check out the [Docker User Guide](/userguide/). -## Release Notes +## Release notes A summary of the changes in each release in the current series can now be found on the separate [Release Notes page](/release-notes/) diff --git a/docs/sources/installation/SUSE.md b/docs/sources/installation/SUSE.md index 2a0aa91d9f331..756ed6b5c1430 100644 --- a/docs/sources/installation/SUSE.md +++ b/docs/sources/installation/SUSE.md @@ -8,7 +8,7 @@ Docker is available in **openSUSE 12.3 and later**. Please note that due to its current limitations Docker is able to run only **64 bit** architecture. Docker is not part of the official repositories of openSUSE 12.3 and -openSUSE 13.1. Hence it is neccessary to add the [Virtualization +openSUSE 13.1. Hence it is necessary to add the [Virtualization repository](https://build.opensuse.org/project/show/Virtualization) from [OBS](https://build.opensuse.org/) to install the `docker` package. diff --git a/docs/sources/installation/amazon.md b/docs/sources/installation/amazon.md index 6a28685dc57c9..3fdeb7228acb4 100644 --- a/docs/sources/installation/amazon.md +++ b/docs/sources/installation/amazon.md @@ -2,48 +2,14 @@ page_title: Installation on Amazon EC2 page_description: Installation instructions for Docker on Amazon EC2. page_keywords: amazon ec2, virtualization, cloud, docker, documentation, installation -# Amazon EC2 +## Amazon EC2 -There are several ways to install Docker on AWS EC2. You can use Amazon Linux, which includes the Docker packages in its Software Repository, or opt for any of the other supported Linux images, for example a [*Standard Ubuntu Installation*](#standard-ubuntu-installation). +You can install Docker on any AWS EC2 Amazon Machine Image (AMI) which runs an +operating system that Docker supports. Amazon's website includes specific +instructions for [installing on Amazon +Linux](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html#install_docker). To install on +another AMI, follow the instructions for its specific operating +system in this installation guide. -**You'll need an** [AWS account](http://aws.amazon.com/) **first, of -course.** - -## Amazon QuickStart with Amazon Linux AMI 2014.09.1 - -The latest Amazon Linux AMI, 2014.09.1, is Docker ready. Docker packages can be installed from Amazon's provided Software -Repository. - -1. **Choose an image:** - - Launch the [Create Instance - Wizard](https://console.aws.amazon.com/ec2/v2/home?#LaunchInstanceWizard:) - menu on your AWS Console. - - In the Quick Start menu, select the Amazon provided AMI for Amazon Linux 2014.09.1 - - For testing you can use the default (possibly free) - `t2.micro` instance (more info on - [pricing](http://aws.amazon.com/ec2/pricing/)). - - Click the `Next: Configure Instance Details` - button at the bottom right. -2. After a few more standard choices where defaults are probably ok, - your Amazon Linux instance should be running! -3. SSH to your instance to install Docker : - `ssh -i ec2-user@` -4. Once connected to the instance, type - `sudo yum install -y docker ; sudo service docker start` - to install and start Docker - -**If this is your first AWS instance, you may need to set up your Security Group to allow SSH.** By default all incoming ports to your new instance will be blocked by the AWS Security Group, so you might just get timeouts when you try to connect. - -Once you`ve got Docker installed, you're ready to try it out – head on -over to the [User Guide](/userguide). - -## Standard Ubuntu Installation - -If you want a more hands-on installation, then you can follow the -[*Ubuntu*](/installation/ubuntulinux) instructions installing Docker -on any EC2 instance running Ubuntu. Just follow Step 1 from the Amazon -QuickStart above to pick an image (or use one of your -own) and skip the step with the *User Data*. Then continue with the -[*Ubuntu*](/installation/ubuntulinux) instructions. - -Continue with the [User Guide](/userguide/). +For detailed information on Amazon AWS support for Docker, refer to [Amazon's +documentation](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/docker-basics.html). diff --git a/docs/sources/installation/azure.md b/docs/sources/installation/azure.md index a8e700fead838..54910228ec77b 100644 --- a/docs/sources/installation/azure.md +++ b/docs/sources/installation/azure.md @@ -1,4 +1,4 @@ -page_title: Installation on Microsoft Azure Platform +page_title: Installation on Microsoft Azure platform page_description: Instructions for creating a Docker-ready virtual machine on Microsoft Azure cloud platform. page_keywords: Docker, Docker documentation, installation, azure, microsoft diff --git a/docs/sources/installation/binaries.md b/docs/sources/installation/binaries.md index ef9f5cafa2271..a9a96bec0d9f0 100644 --- a/docs/sources/installation/binaries.md +++ b/docs/sources/installation/binaries.md @@ -1,4 +1,4 @@ -page_title: Installation from Binaries +page_title: Installation from binaries page_description: Instructions for installing Docker as a binary. Mostly meant for hackers who want to try out Docker on a variety of environments. page_keywords: binaries, installation, docker, documentation, linux @@ -78,18 +78,93 @@ exhibit unexpected behaviour. > vendor for the system, and might break regulations and security > policies in heavily regulated environments. -## Get the docker binary: +## Get the Docker binary + +You can download either the latest release binary or a specific version. +After downloading a binary file, you must set the file's execute bit to run it. + +To set the file's execute bit on Linux and OS X: - $ wget https://get.docker.com/builds/Linux/x86_64/docker-latest -O docker $ chmod +x docker -> **Note**: -> If you have trouble downloading the binary, you can also get the smaller -> compressed release file: -> [https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz]( -> https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz) +To get the list of stable release version numbers from Github, view the +`docker/docker` [releases page](https://github.com/docker/docker/releases). + +> **Note** +> +> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively +> +> 2) You can get the compressed binaries by appending .tgz to the URLs + +### Get the Linux binary + +To download the latest version for Linux, use the +following URLs: + + https://get.docker.com/builds/Linux/i386/docker-latest + + https://get.docker.com/builds/Linux/x86_64/docker-latest + +To download a specific version for Linux, use the +following URL patterns: + + https://get.docker.com/builds/Linux/i386/docker- + + https://get.docker.com/builds/Linux/x86_64/docker- + +For example: + + https://get.docker.com/builds/Linux/i386/docker-1.6.0 + + https://get.docker.com/builds/Linux/x86_64/docker-1.6.0 + + +### Get the Mac OS X binary + +The Mac OS X binary is only a client. You cannot use it to run the `docker` +daemon. To download the latest version for Mac OS X, use the following URLs: + + https://get.docker.com/builds/Darwin/i386/docker-latest + + https://get.docker.com/builds/Darwin/x86_64/docker-latest + +To download a specific version for Mac OS X, use the +following URL patterns: + + https://get.docker.com/builds/Darwin/i386/docker- + + https://get.docker.com/builds/Darwin/x86_64/docker- + +For example: + + https://get.docker.com/builds/Darwin/i386/docker-1.6.0 + + https://get.docker.com/builds/Darwin/x86_64/docker-1.6.0 + +### Get the Windows binary + +You can only download the Windows client binary for version `1.6.0` onwards. +Moreover, the binary is only a client, you cannot use it to run the `docker` daemon. +To download the latest version for Windows, use the following URLs: + + https://get.docker.com/builds/Windows/i386/docker-latest.exe + + https://get.docker.com/builds/Windows/x86_64/docker-latest.exe + +To download a specific version for Windows, use the following URL pattern: + + https://get.docker.com/builds/Windows/i386/docker-.exe + + https://get.docker.com/builds/Windows/x86_64/docker-.exe + +For example: + + https://get.docker.com/builds/Windows/i386/docker-1.6.0.exe + + https://get.docker.com/builds/Windows/x86_64/docker-1.6.0.exe + -## Run the docker daemon +## Run the Docker daemon # start the docker in daemon mode from the directory you unpacked $ sudo ./docker -d & diff --git a/docs/sources/installation/centos.md b/docs/sources/installation/centos.md index 862d508988d4a..7868f11b05727 100644 --- a/docs/sources/installation/centos.md +++ b/docs/sources/installation/centos.md @@ -33,17 +33,6 @@ run the following command: Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon). -### FirewallD - -CentOS-7 introduced firewalld, which is a wrapper around iptables and can -conflict with Docker. - -When `firewalld` is started or restarted it will remove the `DOCKER` chain -from iptables, preventing Docker from working properly. - -When using Systemd, `firewalld` is started before Docker, but if you -start or restart `firewalld` after Docker, you will have to restart the Docker daemon. - ## Installing Docker - CentOS-6.5 For CentOS-6.5, the Docker package is part of [Extra Packages diff --git a/docs/sources/installation/cruxlinux.md b/docs/sources/installation/cruxlinux.md index ead4c273caf12..d474aa52f873a 100644 --- a/docs/sources/installation/cruxlinux.md +++ b/docs/sources/installation/cruxlinux.md @@ -20,7 +20,7 @@ Assuming you have contrib enabled, update your ports tree and install docker (*a # prt-get depinst docker -## Kernel Requirements +## Kernel requirements To have a working **CRUX+Docker** Host you must ensure your Kernel has the necessary modules enabled for the Docker Daemon to function correctly. diff --git a/docs/sources/installation/debian.md b/docs/sources/installation/debian.md index 709a44d41c34e..da9e5f59b132b 100644 --- a/docs/sources/installation/debian.md +++ b/docs/sources/installation/debian.md @@ -11,8 +11,7 @@ Docker is supported on the following versions of Debian: ## Debian Jessie 8.0 (64-bit) -Debian 8 comes with a 3.14.0 Linux kernel, and a `docker.io` package which -installs all its prerequisites from Debian's repository. +Debian 8 comes with a 3.16.0 Linux kernel, the `docker.io` package can be found in the `jessie-backports` repository. Reasoning behind this can be found here. Instructions how to enable the backports repository can be found here. > **Note**: > Debian contains a much older KDE3/GNOME2 package called ``docker``, so the @@ -20,6 +19,8 @@ installs all its prerequisites from Debian's repository. ### Installation +Make sure you enabled the `jessie-backports` repository, as stated above. + To install the latest Debian package (may not be the latest Docker release): $ sudo apt-get update @@ -27,9 +28,10 @@ To install the latest Debian package (may not be the latest Docker release): To verify that everything has worked as expected: - $ sudo docker run -i -t ubuntu /bin/bash + $ sudo docker run --rm hello-world -Which should download the `ubuntu` image, and then start `bash` in a container. +This command downloads and runs the `hello-world` image in a container. When the +container runs, it prints an informational message. Then, it exits. > **Note**: > If you want to enable memory and swap accounting see @@ -39,7 +41,7 @@ Which should download the `ubuntu` image, and then start `bash` in a container. Docker requires Kernel 3.8+, while Wheezy ships with Kernel 3.2 (for more details on why 3.8 is required, see discussion on -[bug #407](https://github.com/docker/docker/issues/407%20kernel%20versions)). +[bug #407](https://github.com/docker/docker/issues/407)). Fortunately, wheezy-backports currently has [Kernel 3.16 ](https://packages.debian.org/search?suite=wheezy-backports§ion=all&arch=any&searchon=names&keywords=linux-image-amd64), diff --git a/docs/sources/installation/mac.md b/docs/sources/installation/mac.md index 9326f5fc47a25..4b157c1682c60 100644 --- a/docs/sources/installation/mac.md +++ b/docs/sources/installation/mac.md @@ -162,7 +162,7 @@ Initialize and run `boot2docker` from the command line, do the following: $ docker run hello-world -## Basic Boot2Docker Exercises +## Basic Boot2Docker exercises At this point, you should have `boot2docker` running and the `docker` client environment initialized. To verify this, run the following commands: @@ -314,7 +314,7 @@ section. The installer places Boot2Docker in your "Applications" folder. -## Learning more and Acknowledgement +## Learning more and acknowledgement Use `boot2docker help` to list the full command line reference. For more diff --git a/docs/sources/installation/oracle.md b/docs/sources/installation/oracle.md index 6d2f782b49264..e05e664c120dc 100644 --- a/docs/sources/installation/oracle.md +++ b/docs/sources/installation/oracle.md @@ -110,7 +110,7 @@ service. On Oracle Linux 7, you can use a `systemd.mount` definition and modify the Docker `systemd.service` to depend on the btrfs mount defined in systemd. -### SElinux Support on Oracle Linux 7 +### SElinux support on Oracle Linux 7 SElinux must be set to `Permissive` or `Disabled` in `/etc/sysconfig/selinux` to use the btrfs storage engine on Oracle Linux 7. diff --git a/docs/sources/installation/rhel.md b/docs/sources/installation/rhel.md index 58b2316c6f3cb..b3bd7aa1d0852 100644 --- a/docs/sources/installation/rhel.md +++ b/docs/sources/installation/rhel.md @@ -7,7 +7,7 @@ page_keywords: Docker, Docker documentation, requirements, linux, rhel Docker is supported on the following versions of RHEL: - [*Red Hat Enterprise Linux 7 (64-bit)*](#red-hat-enterprise-linux-7-installation) -- [*Red Hat Enterprise Linux 6.5 (64-bit)*](#red-hat-enterprise-linux-6.5-installation) or later +- [*Red Hat Enterprise Linux 6.6 (64-bit)*](#red-hat-enterprise-linux-66-installation) or later ## Kernel support @@ -16,7 +16,7 @@ running on kernels shipped by the distribution. There are kernel changes which will cause issues if one decides to step outside that box and run non-distribution kernel packages. -## Red Hat Enterprise Linux 7 Installation +## Red Hat Enterprise Linux 7 installation **Red Hat Enterprise Linux 7 (64 bit)** has [shipped with Docker](https://access.redhat.com/site/products/red-hat-enterprise-linux/docker-and-containers). @@ -41,14 +41,14 @@ Portal](https://access.redhat.com/). Please continue with the [Starting the Docker daemon](#starting-the-docker-daemon). -## Red Hat Enterprise Linux 6.5 Installation +## Red Hat Enterprise Linux 6.6 installation You will need **64 bit** [RHEL -6.5](https://access.redhat.com/site/articles/3078#RHEL6) or later, with -a RHEL 6 kernel version 2.6.32-431 or higher as this has specific kernel -fixes to allow Docker to work. +6.6](https://access.redhat.com/site/articles/3078#RHEL6) or later, with +a RHEL 6 kernel version 2.6.32-504.16.2 or higher as this has specific kernel +fixes to allow Docker to work. Related issues: [#9856](https://github.com/docker/docker/issues/9856). -Docker is available for **RHEL6.5** on EPEL. Please note that +Docker is available for **RHEL6.6** on EPEL. Please note that this package is part of [Extra Packages for Enterprise Linux (EPEL)](https://fedoraproject.org/wiki/EPEL), a community effort to create and maintain additional packages for the RHEL distribution. diff --git a/docs/sources/installation/ubuntulinux.md b/docs/sources/installation/ubuntulinux.md index 6400fdb59a3f3..75b3c9fb68337 100644 --- a/docs/sources/installation/ubuntulinux.md +++ b/docs/sources/installation/ubuntulinux.md @@ -94,7 +94,7 @@ prerequisite installed, Docker's installation process adds it. ##Installing Docker on Ubuntu -Make sure you have intalled the prerequisites for your Ubuntu version. Then, +Make sure you have installed the prerequisites for your Ubuntu version. Then, install Docker using the following: 1. Log into your Ubuntu installation as a user with `sudo` privileges. @@ -127,7 +127,7 @@ install Docker using the following: This command downloads a test image and runs it in a container. -## Optional Configurations for Docker on Ubuntu +## Optional configurations for Docker on Ubuntu This section contains optional procedures for configuring your Ubuntu to work better with Docker. @@ -137,7 +137,7 @@ better with Docker. * [Enable UFW forwarding](#enable-ufw-forwarding) * [Configure a DNS server for use by Docker](#configure-a-dns-server-for-docker) -### Create a docker group +### Create a Docker group The `docker` daemon binds to a Unix socket instead of a TCP port. By default that Unix socket is owned by the user `root` and other users can access it with @@ -253,7 +253,7 @@ The warning occurs because Docker containers can't use the local DNS nameserver. Instead, Docker defaults to using an external nameserver. To avoid this warning, you can specify a DNS server for use by Docker -containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabiling +containers. Or, you can disable `dnsmasq` in NetworkManager. Though, disabling `dnsmasq` might make DNS resolution slower on some networks. To specify a DNS server for use by Docker: diff --git a/docs/sources/installation/windows.md b/docs/sources/installation/windows.md index a1bd1de1d2c88..fd3cc7eb4a40a 100644 --- a/docs/sources/installation/windows.md +++ b/docs/sources/installation/windows.md @@ -30,7 +30,7 @@ is developed, you can launch only Linux containers from your Windows machine. 1. Download the latest release of the [Docker for Windows Installer](https://github.com/boot2docker/windows-installer/releases/latest). -2. Run the installer, which will install Docker Client or Windows, VirtualBox, +2. Run the installer, which will install Docker Client for Windows, VirtualBox, Git for Windows (MSYS-git), the boot2docker Linux ISO, and the Boot2Docker management tool. ![](/installation/images/windows-installer.png) @@ -59,7 +59,7 @@ Let's try the `hello-world` example image. Run This should download the very small `hello-world` image and print a `Hello from Docker.` message. -## Using docker from Windows Command Line Prompt (cmd.exe) +## Using Docker from Windows Command Line Prompt (cmd.exe) Launch a Windows Command Line Prompt (cmd.exe). @@ -77,7 +77,7 @@ to your console window and you are ready to run docker commands such as ![](/installation/images/windows-boot2docker-cmd.png) -## Using docker from PowerShell +## Using Docker from PowerShell Launch a PowerShell window, then you need to add `ssh.exe` to your PATH: diff --git a/docs/sources/introduction/understanding-docker.md b/docs/sources/introduction/understanding-docker.md index 263690217368a..060428ecc92a7 100644 --- a/docs/sources/introduction/understanding-docker.md +++ b/docs/sources/introduction/understanding-docker.md @@ -109,7 +109,7 @@ Docker containers. Docker provides a simple way to build new images or update ex images, or you can download Docker images that other people have already created. Docker images are the **build** component of Docker. -#### Docker Registries +#### Docker registries Docker registries hold images. These are public or private stores from which you upload or download images. The public Docker registry is called [Docker Hub](http://hub.docker.com). It provides a huge collection of existing @@ -135,7 +135,7 @@ So far, we've learned that: Let's look at how these elements combine together to make Docker work. -### How does a Docker Image work? +### How does a Docker image work? We've already seen that Docker images are read-only templates from which Docker containers are launched. Each image consists of a series of layers. Docker makes use of [union file systems](http://en.wikipedia.org/wiki/UnionFS) to @@ -280,7 +280,7 @@ BSD Jails or Solaris Zones. ### Installing Docker Visit the [installation section](/installation/#installation). -### The Docker User Guide +### The Docker user guide [Learn Docker in depth](/userguide/). diff --git a/docs/sources/project/advanced-contributing.md b/docs/sources/project/advanced-contributing.md index 0c9b5d1ce8811..7ee7a86cbe5d8 100644 --- a/docs/sources/project/advanced-contributing.md +++ b/docs/sources/project/advanced-contributing.md @@ -67,7 +67,7 @@ The following provides greater detail on the process: The design proposals are all online in our GitHub pull requests. + 3Akind%2Fproposal" target="_blank">all online in our GitHub pull requests. 3. Talk to the community about your idea. @@ -89,7 +89,7 @@ The following provides greater detail on the process: This is a Markdown file that describes your idea. Your proposal should include information like: - * Why is this changed needed or what are the use cases? + * Why is this change needed or what are the use cases? * What are the requirements this change should meet? * What are some ways to design/implement this feature? * Which design/implementation do you think is best and why? @@ -137,7 +137,7 @@ The following provides greater detail on the process: 14. Acceptance and merge! -## About the Advanced process +## About the advanced process Docker is a large project. Our core team gets a great many design proposals. Design proposal discussions can span days, weeks, and longer. The number of comments can reach the 100s. diff --git a/docs/sources/project/coding-style.md b/docs/sources/project/coding-style.md index e5b6f5fe9cfb1..57f6389365fdb 100644 --- a/docs/sources/project/coding-style.md +++ b/docs/sources/project/coding-style.md @@ -1,13 +1,13 @@ -page_title: Coding Style Checklist +page_title: Coding style checklist page_description: List of guidelines for coding Docker contributions page_keywords: change, commit, squash, request, pull request, test, unit test, integration tests, Go, gofmt, LGTM -# Coding Style Checklist +# Coding style checklist This checklist summarizes the material you experienced working through [make a code contribution](/project/make-a-contribution) and [advanced -contributing](/project/advanced-contributing). The checklist applies to code -that is program code or code that is documentation code. +contributing](/project/advanced-contributing). The checklist applies to both +program code and documentation code. ## Change and commit code diff --git a/docs/sources/project/create-pr.md b/docs/sources/project/create-pr.md index 197aee849d0bf..613ab691123d1 100644 --- a/docs/sources/project/create-pr.md +++ b/docs/sources/project/create-pr.md @@ -11,7 +11,7 @@ repository into the `docker/docker` repository. You can see the list of active pull requests to Docker on GitHub. -## Check Your Work +## Check your work Before you create a pull request, check your work. @@ -22,7 +22,7 @@ Before you create a pull request, check your work. 2. Checkout your feature branch. $ git checkout 11038-fix-rhel-link - Already on '11038-fix-rhel-link' + Switched to branch '11038-fix-rhel-link' 3. Run the full test suite on your branch. @@ -41,7 +41,11 @@ Before you create a pull request, check your work. Always rebase and squash your commits before making a pull request. -1. Fetch any of the last minute changes from `docker/docker`. +1. Checkout your feature branch in your local `docker-fork` repository. + + This is the branch associated with your request. + +2. Fetch any last minute changes from `docker/docker`. $ git fetch upstream master From github.com:docker/docker @@ -56,28 +60,28 @@ Always rebase and squash your commits before making a pull request. pick 1a79f55 Tweak some of the other text for grammar pick 53e4983 Fix a link pick 3ce07bb Add a new line about RHEL - - If you run into trouble, `git --rebase abort` removes any changes and gets - you back to where you started. -4. Squash the `pick` keyword with `squash` on all but the first commit. +5. Replace the `pick` keyword with `squash` on all but the first commit. pick 1a79f55 Tweak some of the other text for grammar squash 53e4983 Fix a link squash 3ce07bb Add a new line about RHEL - After closing the file, `git` opens your editor again to edit the commit - message. + After you save the changes and quit from the editor, git starts + the rebase, reporting the progress along the way. Sometimes + your changes can conflict with the work of others. If git + encounters a conflict, it stops the rebase, and prints guidance + for how to correct the conflict. -5. Edit and save your commit message. +6. Edit and save your commit message. `git commit -s` - Make sure your message includes + +3. Commit the change. $ git commit --amend Git opens an editor containing your last commit message. -3. Adjust your last comment to reflect this new change. +4. Adjust your last comment to reflect this new change. Added a new sentence per Anaud's suggestion @@ -72,15 +80,17 @@ To update your existing pull request: # modified: docs/sources/installation/mac.md # modified: docs/sources/installation/rhel.md -4. Push to your origin. +5. Force push the change to your origin. + + The command syntax is: - $ git push origin + git push -f origin -5. Open your browser to your pull request on GitHub. +6. Open your browser to your pull request on GitHub. You should see your pull request now contains your newly pushed code. -6. Add a comment to your pull request. +7. Add a comment to your pull request. GitHub only notifies PR participants when you comment. For example, you can mention that you updated your PR. Your comment alerts the maintainers that diff --git a/docs/sources/project/set-up-dev-env.md b/docs/sources/project/set-up-dev-env.md index 80d4f335b3a46..60a59b6155f91 100644 --- a/docs/sources/project/set-up-dev-env.md +++ b/docs/sources/project/set-up-dev-env.md @@ -209,7 +209,7 @@ build and run a `docker` binary in your container. root@5f8630b873fe:/go/src/github.com/docker/docker# The command creates a container from your `dry-run-test` image. It opens an - interactive terminal (`-ti`) running a `/bin/bash shell`. The + interactive terminal (`-ti`) running a `/bin/bash` shell. The `--privileged` flag gives the container access to kernel features and device access. This flag allows you to run a container in a container. Finally, the `-rm` flag instructs Docker to remove the container when you diff --git a/docs/sources/project/set-up-git.md b/docs/sources/project/set-up-git.md index 677722c6a0aec..d67ff817c6144 100644 --- a/docs/sources/project/set-up-git.md +++ b/docs/sources/project/set-up-git.md @@ -46,9 +46,12 @@ target="_blank">docker/docker repository. that instead. You'll need to convert what you see in the guide to what is appropriate to your tool. -5. Open a terminal window on your local host and change to your home directory. In Windows, you'll work in your Boot2Docker window instead of Powershell or cmd. +5. Open a terminal window on your local host and change to your home directory. $ cd ~ + + In Windows, you'll work in your Boot2Docker window instead of Powershell or + a `cmd` window. 6. Create a `repos` directory. @@ -88,7 +91,7 @@ contributions through pseudonyms. As you change code in your fork, you'll want to keep it in sync with the changes others make in the `docker/docker` repository. To make syncing easier, you'll also add a _remote_ called `upstream` that points to `docker/docker`. A remote -is just another a project version hosted on the internet or network. +is just another project version hosted on the internet or network. To configure your username, email, and add a remote: @@ -171,7 +174,7 @@ the branch to your fork on GitHub: You can use any text editor you are comfortable with. -6. Close and save the file. +6. Save and close the file. 7. Check the status of your branch. diff --git a/docs/sources/project/software-req-win.md b/docs/sources/project/software-req-win.md new file mode 100644 index 0000000000000..a7f1378929ec3 --- /dev/null +++ b/docs/sources/project/software-req-win.md @@ -0,0 +1,258 @@ +page_title: Set up for development on Windows +page_description: How to set up a server to test Docker Windows client +page_keywords: development, inception, container, image Dockerfile, dependencies, Go, artifacts, windows + + +# Get the required software for Windows + +This page explains how to get the software you need to use a a Windows Server +2012 or Windows 8 machine for Docker development. Before you begin contributing +you must have: + +- a GitHub account +- Git for Windows (msysGit) +- TDM-GCC, a compiler suite for Windows +- MinGW (tar and xz) +- Go language + +> **Note**: This installation prcedure refers to the `C:\` drive. If you system's main drive +is `D:\` you'll need to substitute that in where appropriate in these +instructions. + +### Get a GitHub account + +To contribute to the Docker project, you will need a GitHub account. A free account is +fine. All the Docker project repositories are public and visible to everyone. + +You should also have some experience using both the GitHub application and `git` +on the command line. + +## Install Git for Windows + +Git for Windows includes several tools including msysGit, which is a build +environment. The environment contains the tools you need for development such as +Git and a Git Bash shell. + +1. Browse to the [Git for Windows](https://msysgit.github.io/) download page. + +2. Click **Download**. + + Windows prompts you to save the file to your machine. + +3. Run the saved file. + + The system displays the **Git Setup** wizard. + +4. Click the **Next** button to move through the wizard and accept all the defaults. + +5. Click **Finish** when you are done. + +## Installing TDM-GCC + +TDM-GCC is a compiler suite for Windows. You'll use this suite to compile the +Docker Go code as you develop. + +1. Browse to + [tdm-gcc download page](http://tdm-gcc.tdragon.net/download). + +2. Click on the lastest 64-bit version of the package. + + Windows prompts you to save the file to your machine + +3. Set up the suite by running the downloaded file. + + The system opens the **TDM-GCC Setup** wizard. + +4. Click **Create**. + +5. Click the **Next** button to move through the wizard and accept all the defaults. + +6. Click **Finish** when you are done. + + +## Installing MinGW (tar and xz) + +MinGW is a minimalist port of the GNU Compiler Collection (GCC). In this +procedure, you first download and install the MinGW installation manager. Then, +you use the manager to install the `tar` and `xz` tools from the collection. + +1. Browse to MinGW + [SourceForge](http://sourceforge.net/projects/mingw/). + +2. Click **Download**. + + Windows prompts you to save the file to your machine + +3. Run the downloaded file. + + The system opens the **MinGW Installation Manager Setup Tool** + +4. Choose **Install** install the MinGW Installation Manager. + +5. Press **Continue**. + + The system installs and then opens the MinGW Installation Manager. + +6. Press **Continue** after the install completes to open the manager. + +7. Select **All Packages > MSYS Base System** from the left hand menu. + + The system displays the available packages. + +8. Click on the the **msys-tar bin** package and choose **Mark for Installation**. + +9. Click on the **msys-xz bin** package and choose **Mark for Installation**. + +10. Select **Installation > Apply Changes**, to install the selected packages. + + The system displays the **Schedule of Pending Actions Dialog**. + + ![windows-mingw](/project/images/windows-mingw.png) + +11. Press **Apply** + + MingGW installs the packages for you. + +12. Close the dialog and the MinGW Installation Manager. + + +## Set up your environment variables + +You'll need to add the compiler to your `Path` environment variable. + +1. Open the **Control Panel**. + +2. Choose **System and Security > System**. + +3. Click the **Advanced system settings** link in the sidebar. + + The system opens the **System Properties** dialog. + +3. Select the **Advanced** tab. + +4. Click **Environment Variables**. + + The system opens the **Environment Variables dialog** dialog. + +5. Locate the **System variables** area and scroll to the **Path** + variable. + + ![windows-mingw](/project/images/path_variable.png) + +6. Click **Edit** to edit the variable (you can also double-click it). + + The system opens the **Edit System Variable** dialog. + +7. Make sure the `Path` includes `C:\TDM-GCC64\bin` + + ![include gcc](/project/images/include_gcc.png) + + If you don't see `C:\TDM-GCC64\bin`, add it. + +8. Press **OK** to close this dialog. + +9. Press **OK** twice to close out of the remaining dialogs. + +## Install Go and cross-compile it + +In this section, you install the Go language. Then, you build the source so that it can cross-compile for `linux/amd64` architectures. + +1. Open [Go Language download](http://golang.org/dl/) page in your browser. + +2. Locate and click the latest `.msi` installer. + + The system prompts you to save the file. + +3. Run the installer. + + The system opens the **Go Programming Langauge Setup** dialog. + +4. Select all the defaults to install. + +5. Press **Finish** to close the installation dialog. + +6. Start a command prompt. + +7. Change to the Go `src` directory. + + cd c:\Go\src + +8. Set the following Go variables + + c:\Go\src> set GOOS=linux + c:\Go\src> set GOARCH=amd64 + +9. Compile the source. + + c:\Go\src> make.bat + + Compiling the source also adds a number of variables to your Windows environment. + +## Get the Docker repository + +In this step, you start a Git `bash` terminal and get the Docker source code from +Github. + +1. Locate the **Git Bash** program and start it. + + Recall that **Git Bash** came with the Git for Windows installation. **Git + Bash** just as it sounds allows you to run a Bash terminal on Windows. + + ![Git Bash](/project/images/git_bash.png) + +2. Change to the root directory. + + $ cd /c/ + +3. Make a `gopath` directory. + + $ mkdir gopath + +4. Go get the `docker/docker` repository. + + $ go.exe get github.com/docker/docker package github.com/docker/docker + imports github.com/docker/docker + imports github.com/docker/docker: no buildable Go source files in C:\gopath\src\github.com\docker\docker + + In the next steps, you create environment variables for you Go paths. + +5. Open the **Control Panel** on your system. + +6. Choose **System and Security > System**. + +7. Click the **Advanced system settings** link in the sidebar. + + The system opens the **System Properties** dialog. + +8. Select the **Advanced** tab. + +9. Click **Environment Variables**. + + The system opens the **Environment Variables dialog** dialog. + +10. Locate the **System variables** area and scroll to the **Path** + variable. + +11. Click **New**. + + Now you are going to create some new variables. These paths you'll create in the next procedure; but you can set them now. + +12. Enter `GOPATH` for the **Variable Name**. + +13. For the **Variable Value** enter the following: + + C:\gopath;C:\gopath\src\github.com\docker\docker\vendor + + +14. Press **OK** to close this dialog. + + The system adds `GOPATH` to the list of **System Variables**. + +15. Press **OK** twice to close out of the remaining dialogs. + + +## Where to go next + +In the next section, you'll [learn how to set up and configure Git for +contributing to Docker](/project/set-up-git/). \ No newline at end of file diff --git a/docs/sources/project/software-required.md b/docs/sources/project/software-required.md index 476cbbc2cac81..15b9a693526b9 100644 --- a/docs/sources/project/software-required.md +++ b/docs/sources/project/software-required.md @@ -2,9 +2,10 @@ page_title: Get the required software page_description: Describes the software required to contribute to Docker page_keywords: GitHub account, repository, Docker, Git, Go, make, -# Get the required software +# Get the required software for Linux or OS X -Before you begin contributing you must have: +This page explains how to get the software you need to use a Linux or OS X +machine for Docker development. Before you begin contributing you must have: * a GitHub account * `git` @@ -82,7 +83,7 @@ your user to the `docker` group as follows: $ sudo usermod -aG docker ubuntu -You must log out and back in for this modification to take effect. +You must log out and log back in for this modification to take effect. ## Where to go next diff --git a/docs/sources/project/test-and-docs.md b/docs/sources/project/test-and-docs.md index 93f73282929dd..23b6b0914d6ea 100644 --- a/docs/sources/project/test-and-docs.md +++ b/docs/sources/project/test-and-docs.md @@ -40,7 +40,7 @@ units each have unit tests and then, together, integration tests that test the interface between the components. The `integration` and `integration-cli` directories in the Docker repository contain integration test code. -Testing is its own speciality. If you aren't familiar with testing techniques, +Testing is its own specialty. If you aren't familiar with testing techniques, there is a lot of information available to you on the Web. For now, you should understand that, the Docker maintainers may ask you to write a new test or change an existing one. @@ -159,15 +159,16 @@ Most test targets require that you build these precursor targets first: ## Running individual or multiple named tests +We use [gocheck](https://labix.org/gocheck) for our integration-cli tests. You can use the `TESTFLAGS` environment variable to run a single test. The flag's value is passed as arguments to the `go test` command. For example, from your local host you can run the `TestBuild` test with this command: - $ TESTFLAGS='-test.run ^TestBuild$' make test + $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test To run the same test inside your Docker development container, you do this: - root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-run ^TestBuild$' hack/make.sh + root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh ## If tests under Boot2Docker fail due to disk space errors @@ -229,6 +230,46 @@ with new memory settings. 6. Restart your container and try your test again. +## Testing just the Windows client + +This explains how to test the Windows client on a Windows server set up as a +development environment. You'll use the **Git Bash** came with the Git for +Windows installation. **Git Bash** just as it sounds allows you to run a Bash +terminal on Windows. + +1. If you don't have one, start a Git Bash terminal. + + ![Git Bash](/project/images/git_bash.png) + +2. Change to the `docker` source directory. + + $ cd /c/gopath/src/github.com/docker/docker + +3. Set `DOCKER_CLIENTONLY` as follows: + + $ export DOCKER_CLIENTONLY=1 + + This ensures you are building only the client binary instead of both the + binary and the daemon. + +4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your +machine's actual IP address, for example: + + $ export DOCKER_TEST_HOST=tcp://263.124.23.200:2376 + +5. Make the binary and the test: + + $ hack/make.sh binary test-integration-cli + + Many tests are skipped on Windows for various reasons. You see which tests + were skipped by re-running the make and passing in the + `TESTFLAGS='-test.v'` value. + + +You can now choose to make changes to the Docker source or the tests. If you +make any changes just run these commands again. + + ## Build and test the documentation The Docker documentation source files are under `docs/sources`. The content is diff --git a/docs/sources/project/work-issue.md b/docs/sources/project/work-issue.md index 5e70bc32cc47e..3291102897b36 100644 --- a/docs/sources/project/work-issue.md +++ b/docs/sources/project/work-issue.md @@ -109,7 +109,7 @@ Follow this workflow as you work: 9. Push your change to your repository. - $ git push origin + $ git push origin 11038-fix-rhel-link Username for 'https://github.com': moxiegirl Password for 'https://moxiegirl@github.com': Counting objects: 60, done. @@ -145,55 +145,46 @@ After you push a new branch, you should verify it on GitHub: You should pull and rebase frequently as you work. -1. Return to the terminal on your local machine. +1. Return to the terminal on your local machine and checkout your + feature branch in your local `docker-fork` repository. -2. Make sure you are in your branch. +2. Fetch any last minute changes from `docker/docker`. - $ git branch 11038-fix-rhel-link + $ git fetch upstream master + From github.com:docker/docker + * branch master -> FETCH_HEAD -3. Fetch all the changes from the `upstream master` branch. +3. Start an interactive rebase. - $ git fetch upstream master + $ git rebase -i upstream/master - This command says get all the changes from the `master` branch belonging to - the `upstream` remote. +4. Rebase opens an editor with a list of commits. -4. Rebase your master with the local copy of Docker's `master` branch. + pick 1a79f55 Tweak some of the other text for grammar + pick 53e4983 Fix a link + pick 3ce07bb Add a new line about RHEL - $ git rebase -i upstream/master - - This command starts an interactive rebase to rewrite all the commits from - Docker's `upstream/master` onto your local branch, and then re-apply each of - your commits on top of the upstream changes. If you aren't familiar or - comfortable with rebase, you can learn more about rebasing on the web. - -5. Rebase opens an editor with a list of commits. +5. Replace the `pick` keyword with `squash` on all but the first commit. - pick 1a79f55 Tweak some of the other text for grammar - pick 53e4983 Fix a link - pick 3ce07bb Add a new line about RHEL - - If you run into trouble, `git --rebase abort` removes any changes and gets - you back to where you started. + pick 1a79f55 Tweak some of the other text for grammar + squash 53e4983 Fix a link + squash 3ce07bb Add a new line about RHEL -6. Squash the `pick` keyword with `squash` on all but the first commit. + After you save the changes and quit from the editor, git starts + the rebase, reporting the progress along the way. Sometimes + your changes can conflict with the work of others. If git + encounters a conflict, it stops the rebase, and prints guidance + for how to correct the conflict. - pick 1a79f55 Tweak some of the other text for grammar - squash 53e4983 Fix a link - squash 3ce07bb Add a new line about RHEL +6. Edit and save your commit message. - After closing the file, `git` opens your editor again to edit the commit - message. + `git commit -s` -7. Edit the commit message to reflect the entire change. + Make sure your message includes /: {}" }` @@ -225,8 +225,8 @@ Json Parameters: container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - - **CapAdd** - A list of kernel capabilties to add to the container. - - **Capdrop** - A list of kernel capabilties to drop from the container. + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -553,8 +553,8 @@ Json Parameters: - **DnsSearch** - A list of DNS search domains - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` -- **CapAdd** - A list of kernel capabilties to add to the container. -- **Capdrop** - A list of kernel capabilties to drop from the container. +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -766,7 +766,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 @@ -1261,7 +1261,7 @@ Query Parameters: Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1495,7 +1495,7 @@ Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image -(and its parents) are returned. If `name` is an image ID, similarly only tha +(and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. diff --git a/docs/sources/reference/api/docker_remote_api_v1.16.md b/docs/sources/reference/api/docker_remote_api_v1.16.md index a0c875889e352..9c6159b9d5023 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.16.md +++ b/docs/sources/reference/api/docker_remote_api_v1.16.md @@ -174,12 +174,12 @@ Json Parameters: container. - **Domainname** - A string value containing the desired domain name to use for the container. -- **User** - A string value containg the user to use inside the container. +- **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap**- Total memory usage (memory + swap); set `-1` to disable swap. - **CpuShares** - An integer value containing the CPU Shares for container - (ie. the relative weight vs othercontainers). - **CpuSet** - String value containg the cgroups Cpuset to use. + (ie. the relative weight vs other containers). + **CpuSet** - String value containing the cgroups Cpuset to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. @@ -195,7 +195,7 @@ Json Parameters: container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. -- **NetworkDisabled** - Boolean value, when true disables neworking for the +- **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` @@ -225,8 +225,8 @@ Json Parameters: container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - - **CapAdd** - A list of kernel capabilties to add to the container. - - **Capdrop** - A list of kernel capabilties to drop from the container. + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -553,8 +553,8 @@ Json Parameters: - **DnsSearch** - A list of DNS search domains - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` -- **CapAdd** - A list of kernel capabilties to add to the container. -- **Capdrop** - A list of kernel capabilties to drop from the container. +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -766,7 +766,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 @@ -1262,7 +1262,7 @@ Query Parameters: Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1509,7 +1509,7 @@ Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image -(and its parents) are returned. If `name` is an image ID, similarly only tha +(and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. diff --git a/docs/sources/reference/api/docker_remote_api_v1.17.md b/docs/sources/reference/api/docker_remote_api_v1.17.md index d0abaffd0c043..80f4fccf00301 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.17.md +++ b/docs/sources/reference/api/docker_remote_api_v1.17.md @@ -175,13 +175,13 @@ Json Parameters: container. - **Domainname** - A string value containing the desired domain name to use for the container. -- **User** - A string value containg the user to use inside the container. +- **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, always use this with `memory`, and make the value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container - (ie. the relative weight vs othercontainers). - **CpuSet** - String value containg the cgroups Cpuset to use. + (ie. the relative weight vs other containers). + **CpuSet** - String value containing the cgroups Cpuset to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. @@ -197,7 +197,7 @@ Json Parameters: container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. -- **NetworkDisabled** - Boolean value, when true disables neworking for the +- **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` @@ -227,8 +227,8 @@ Json Parameters: container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` - - **CapAdd** - A list of kernel capabilties to add to the container. - - **Capdrop** - A list of kernel capabilties to drop from the container. + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -686,8 +686,8 @@ Json Parameters: - **DnsSearch** - A list of DNS search domains - **VolumesFrom** - A list of volumes to inherit from another container. Specified in the form `[:]` -- **CapAdd** - A list of kernel capabilties to add to the container. -- **Capdrop** - A list of kernel capabilties to drop from the container. +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. - **RestartPolicy** – The behavior to apply when the container exits. The value is an object with a `Name` property of either `"always"` to always restart or `"on-failure"` to restart only when the container @@ -927,7 +927,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 @@ -1140,7 +1140,7 @@ Query Parameters: Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1675,7 +1675,7 @@ Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image -(and its parents) are returned. If `name` is an image ID, similarly only tha +(and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. diff --git a/docs/sources/reference/api/docker_remote_api_v1.18.md b/docs/sources/reference/api/docker_remote_api_v1.18.md index 75eed99dad4eb..a91ca8417a121 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.18.md +++ b/docs/sources/reference/api/docker_remote_api_v1.18.md @@ -91,6 +91,7 @@ Query Parameters: - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) + - label=`key` or `key=value` of a container label Status Codes: @@ -183,14 +184,14 @@ Json Parameters: container. - **Domainname** - A string value containing the desired domain name to use for the container. -- **User** - A string value containg the user to use inside the container. +- **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, always use this with `memory`, and make the value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container - (ie. the relative weight vs othercontainers). + (ie. the relative weight vs other containers). - **Cpuset** - The same as CpusetCpus, but deprecated, please don't use. -- **CpusetCpus** - String value containg the cgroups CpusetCpus to use. +- **CpusetCpus** - String value containing the cgroups CpusetCpus to use. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. @@ -207,60 +208,61 @@ Json Parameters: container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. -- **NetworkDisabled** - Boolean value, when true disables neworking for the +- **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - - **Binds** – A list of volume bindings for this container. Each volume - binding is a string of the form `container_path` (to create a new - volume for the container), `host_path:container_path` (to bind-mount - a host path into the container), or `host_path:container_path:ro` - (to make the bind-mount read-only inside the container). - - **Links** - A list of links for the container. Each link entry should be of - of the form "container_name:alias". - - **LxcConf** - LXC specific configurations. These configurations will only - work when using the `lxc` execution driver. - - **PortBindings** - A map of exposed container ports and the host port they - should map to. It should be specified in the form - `{ /: [{ "HostPort": "" }] }` - Take note that `port` is specified as a string and not an integer value. - - **PublishAllPorts** - Allocates a random host port for all of a container's - exposed ports. Specified as a boolean value. - - **Privileged** - Gives the container full access to the host. Specified as - a boolean value. - - **ReadonlyRootfs** - Mount the container's root filesystem as read only. - Specified as a boolean value. - - **Dns** - A list of dns servers for the container to use. - - **DnsSearch** - A list of DNS search domains - - **ExtraHosts** - A list of hostnames/IP mappings to be added to the - container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - - **VolumesFrom** - A list of volumes to inherit from another container. - Specified in the form `[:]` - - **CapAdd** - A list of kernel capabilties to add to the container. - - **Capdrop** - A list of kernel capabilties to drop from the container. - - **RestartPolicy** – The behavior to apply when the container exits. The - value is an object with a `Name` property of either `"always"` to - always restart or `"on-failure"` to restart only when the container - exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` - controls the number of times to retry before giving up. - The default is not to restart. (optional) - An ever increasing delay (double the previous delay, starting at 100mS) - is added before each restart to prevent flooding the server. - - **NetworkMode** - Sets the networking mode for the container. Supported - values are: `bridge`, `host`, and `container:` - - **Devices** - A list of devices to add to the container specified in the - form - `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - - **Ulimits** - A list of ulimits to be set in the container, specified as - `{ "Name": , "Soft": , "Hard": }`, for example: - `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` - - **SecurityOpt**: A list of string values to customize labels for MLS - systems, such as SELinux. - - **LogConfig** - Logging configuration to container, format: - `{ "Type": "", "Config": {"key1": "val1"}}`. - Available types: `json-file`, `syslog`, `none`. - - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to be set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. Query Parameters: @@ -358,7 +360,10 @@ Return low-level information on the container `id` "MaximumRetryCount": 2, "Name": "on-failure" }, - "LogConfig": { "Type": "json-file", Config: {} }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}] @@ -675,12 +680,90 @@ Start the container `id` POST /containers/(id)/start HTTP/1.1 Content-Type: application/json + { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", Config: {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } + **Example response**: HTTP/1.1 204 No Content Json Parameters: +- **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). +- **Links** - A list of links for the container. Each link entry should be of + of the form `container_name:alias`. +- **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. +- **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. +- **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. +- **Privileged** - Gives the container full access to the host. Specified as + a boolean value. +- **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. +- **Dns** - A list of dns servers for the container to use. +- **DnsSearch** - A list of DNS search domains +- **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. +- **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. +- **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` +- **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` +- **Ulimits** - A list of ulimits to be set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` +- **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **LogConfig** - Log configuration for the container, specified as + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `none`. + `json-file` logging driver. +- **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + Status Codes: - **204** – no error @@ -906,7 +989,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 @@ -1109,6 +1192,7 @@ Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true + - label=`key` or `key=value` of an image label ### Build image from a Dockerfile @@ -1163,12 +1247,12 @@ Query Parameters: - **memory** - set memory limit for build - **memswap** - Total memory (memory + swap), `-1` to disable swap - **cpushares** - CPU shares (relative weight) -- **cpusetcpus** - CPUs in which to allow exection, e.g., `0-3`, `0,1` +- **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1` Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1709,7 +1793,7 @@ Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image -(and its parents) are returned. If `name` is an image ID, similarly only tha +(and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. diff --git a/docs/sources/reference/api/docker_remote_api_v1.19.md b/docs/sources/reference/api/docker_remote_api_v1.19.md index b643d449042f4..cede2e1073dd7 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.19.md +++ b/docs/sources/reference/api/docker_remote_api_v1.19.md @@ -91,6 +91,7 @@ Query Parameters: - **filters** - a json encoded value of the filters (a map[string][]string) to process on the containers list. Available filters: - exited=<int> -- containers with exit code of <int> - status=(restarting|running|paused|exited) + - label=`key` or `key=value` of a container label Status Codes: @@ -147,6 +148,7 @@ Create a container "MemorySwap": 0, "CpuShares": 512, "CpusetCpus": "0,1", + "CpusetMems": "0,1", "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, "PublishAllPorts": false, "Privileged": false, @@ -161,7 +163,7 @@ Create a container "NetworkMode": "bridge", "Devices": [], "Ulimits": [{}], - "LogConfig": { "Type": "json-file", Config: {} }, + "LogConfig": { "Type": "json-file", "Config": {} }, "SecurityOpt": [""], "CgroupParent": "" } @@ -183,14 +185,15 @@ Json Parameters: container. - **Domainname** - A string value containing the desired domain name to use for the container. -- **User** - A string value containg the user to use inside the container. +- **User** - A string value containing the user to use inside the container. - **Memory** - Memory limit in bytes. - **MemorySwap**- Total memory limit (memory + swap); set `-1` to disable swap, always use this with `memory`, and make the value larger than `memory`. - **CpuShares** - An integer value containing the CPU Shares for container - (ie. the relative weight vs othercontainers). + (ie. the relative weight vs other containers). - **Cpuset** - The same as CpusetCpus, but deprecated, please don't use. -- **CpusetCpus** - String value containg the cgroups CpusetCpus to use. +- **CpusetCpus** - String value containing the cgroups CpusetCpus to use. +- **CpusetMems** - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - **AttachStdin** - Boolean value, attaches to stdin. - **AttachStdout** - Boolean value, attaches to stdout. - **AttachStderr** - Boolean value, attaches to stderr. @@ -207,60 +210,61 @@ Json Parameters: container to empty objects. - **WorkingDir** - A string value containing the working dir for commands to run in. -- **NetworkDisabled** - Boolean value, when true disables neworking for the +- **NetworkDisabled** - Boolean value, when true disables networking for the container - **ExposedPorts** - An object mapping ports to an empty object in the form of: `"ExposedPorts": { "/: {}" }` - **HostConfig** - - **Binds** – A list of volume bindings for this container. Each volume - binding is a string of the form `container_path` (to create a new - volume for the container), `host_path:container_path` (to bind-mount - a host path into the container), or `host_path:container_path:ro` - (to make the bind-mount read-only inside the container). - - **Links** - A list of links for the container. Each link entry should be of - of the form "container_name:alias". - - **LxcConf** - LXC specific configurations. These configurations will only - work when using the `lxc` execution driver. - - **PortBindings** - A map of exposed container ports and the host port they - should map to. It should be specified in the form - `{ /: [{ "HostPort": "" }] }` - Take note that `port` is specified as a string and not an integer value. - - **PublishAllPorts** - Allocates a random host port for all of a container's - exposed ports. Specified as a boolean value. - - **Privileged** - Gives the container full access to the host. Specified as - a boolean value. - - **ReadonlyRootfs** - Mount the container's root filesystem as read only. - Specified as a boolean value. - - **Dns** - A list of dns servers for the container to use. - - **DnsSearch** - A list of DNS search domains - - **ExtraHosts** - A list of hostnames/IP mappings to be added to the - container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. - - **VolumesFrom** - A list of volumes to inherit from another container. - Specified in the form `[:]` - - **CapAdd** - A list of kernel capabilties to add to the container. - - **Capdrop** - A list of kernel capabilties to drop from the container. - - **RestartPolicy** – The behavior to apply when the container exits. The - value is an object with a `Name` property of either `"always"` to - always restart or `"on-failure"` to restart only when the container - exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` - controls the number of times to retry before giving up. - The default is not to restart. (optional) - An ever increasing delay (double the previous delay, starting at 100mS) - is added before each restart to prevent flooding the server. - - **NetworkMode** - Sets the networking mode for the container. Supported - values are: `bridge`, `host`, and `container:` - - **Devices** - A list of devices to add to the container specified in the - form - `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` - - **Ulimits** - A list of ulimits to be set in the container, specified as - `{ "Name": , "Soft": , "Hard": }`, for example: - `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` - - **SecurityOpt**: A list of string values to customize labels for MLS - systems, such as SELinux. - - **LogConfig** - Logging configuration to container, format: - `{ "Type": "", "Config": {"key1": "val1"}}`. - Available types: `json-file`, `syslog`, `none`. - - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + - **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). + - **Links** - A list of links for the container. Each link entry should be of + of the form `container_name:alias`. + - **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. + - **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. + - **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. + - **Privileged** - Gives the container full access to the host. Specified as + a boolean value. + - **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. + - **Dns** - A list of dns servers for the container to use. + - **DnsSearch** - A list of DNS search domains + - **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + - **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` + - **CapAdd** - A list of kernel capabilities to add to the container. + - **Capdrop** - A list of kernel capabilities to drop from the container. + - **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. + - **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` + - **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` + - **Ulimits** - A list of ulimits to be set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` + - **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. + - **LogConfig** - Log configuration for the container, specified as + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. + - **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. Query Parameters: @@ -339,6 +343,7 @@ Return low-level information on the container `id` "CapDrop": null, "ContainerIDFile": "", "CpusetCpus": "", + "CpusetMems": "", "CpuShares": 0, "Devices": [], "Dns": null, @@ -358,7 +363,10 @@ Return low-level information on the container `id` "MaximumRetryCount": 2, "Name": "on-failure" }, - "LogConfig": { "Type": "json-file", Config: {} }, + "LogConfig": { + "Config": null, + "Type": "json-file" + }, "SecurityOpt": null, "VolumesFrom": null, "Ulimits": [{}] @@ -675,12 +683,90 @@ Start the container `id` POST /containers/(id)/start HTTP/1.1 Content-Type: application/json + { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 0, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } + **Example response**: HTTP/1.1 204 No Content Json Parameters: +- **Binds** – A list of volume bindings for this container. Each volume + binding is a string of the form `container_path` (to create a new + volume for the container), `host_path:container_path` (to bind-mount + a host path into the container), or `host_path:container_path:ro` + (to make the bind-mount read-only inside the container). +- **Links** - A list of links for the container. Each link entry should be of + of the form `container_name:alias`. +- **LxcConf** - LXC specific configurations. These configurations will only + work when using the `lxc` execution driver. +- **PortBindings** - A map of exposed container ports and the host port they + should map to. It should be specified in the form + `{ /: [{ "HostPort": "" }] }` + Take note that `port` is specified as a string and not an integer value. +- **PublishAllPorts** - Allocates a random host port for all of a container's + exposed ports. Specified as a boolean value. +- **Privileged** - Gives the container full access to the host. Specified as + a boolean value. +- **ReadonlyRootfs** - Mount the container's root filesystem as read only. + Specified as a boolean value. +- **Dns** - A list of dns servers for the container to use. +- **DnsSearch** - A list of DNS search domains +- **ExtraHosts** - A list of hostnames/IP mappings to be added to the + container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. +- **VolumesFrom** - A list of volumes to inherit from another container. + Specified in the form `[:]` +- **CapAdd** - A list of kernel capabilities to add to the container. +- **Capdrop** - A list of kernel capabilities to drop from the container. +- **RestartPolicy** – The behavior to apply when the container exits. The + value is an object with a `Name` property of either `"always"` to + always restart or `"on-failure"` to restart only when the container + exit code is non-zero. If `on-failure` is used, `MaximumRetryCount` + controls the number of times to retry before giving up. + The default is not to restart. (optional) + An ever increasing delay (double the previous delay, starting at 100mS) + is added before each restart to prevent flooding the server. +- **NetworkMode** - Sets the networking mode for the container. Supported + values are: `bridge`, `host`, and `container:` +- **Devices** - A list of devices to add to the container specified in the + form + `{ "PathOnHost": "/dev/deviceName", "PathInContainer": "/dev/deviceName", "CgroupPermissions": "mrw"}` +- **Ulimits** - A list of ulimits to be set in the container, specified as + `{ "Name": , "Soft": , "Hard": }`, for example: + `Ulimits: { "Name": "nofile", "Soft": 1024, "Hard", 2048 }}` +- **SecurityOpt**: A list of string values to customize labels for MLS + systems, such as SELinux. +- **LogConfig** - Log configuration for the container, specified as + `{ "Type": "", "Config": {"key1": "val1"}}`. + Available types: `json-file`, `syslog`, `journald`, `none`. + `json-file` logging driver. +- **CgroupParent** - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. + Status Codes: - **204** – no error @@ -906,7 +992,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1 @@ -1109,6 +1195,7 @@ Query Parameters: - **all** – 1/True/true or 0/False/false, default false - **filters** – a json encoded value of the filters (a map[string][]string) to process on the images list. Available filters: - dangling=true + - label=`key` or `key=value` of an image label ### Build image from a Dockerfile @@ -1163,12 +1250,12 @@ Query Parameters: - **memory** - set memory limit for build - **memswap** - Total memory (memory + swap), `-1` to disable swap - **cpushares** - CPU shares (relative weight) -- **cpusetcpus** - CPUs in which to allow exection, e.g., `0-3`, `0,1` +- **cpusetcpus** - CPUs in which to allow execution, e.g., `0-3`, `0,1` Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1709,7 +1796,7 @@ Get a tarball containing all images and metadata for the repository specified by `name`. If `name` is a specific name and tag (e.g. ubuntu:latest), then only that image -(and its parents) are returned. If `name` is an image ID, similarly only tha +(and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the 'repositories' file in the tarball, as there were no image names referenced. diff --git a/docs/sources/reference/api/docker_remote_api_v1.6.md b/docs/sources/reference/api/docker_remote_api_v1.6.md index d0f9661e50913..cd8a7308841dc 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.6.md +++ b/docs/sources/reference/api/docker_remote_api_v1.6.md @@ -560,7 +560,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1) diff --git a/docs/sources/reference/api/docker_remote_api_v1.7.md b/docs/sources/reference/api/docker_remote_api_v1.7.md index 6cdd60374fb14..dade45fbc9d23 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.7.md +++ b/docs/sources/reference/api/docker_remote_api_v1.7.md @@ -505,7 +505,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1) diff --git a/docs/sources/reference/api/docker_remote_api_v1.8.md b/docs/sources/reference/api/docker_remote_api_v1.8.md index 409e63a163499..56260db867c02 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.8.md +++ b/docs/sources/reference/api/docker_remote_api_v1.8.md @@ -553,7 +553,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1) diff --git a/docs/sources/reference/api/docker_remote_api_v1.9.md b/docs/sources/reference/api/docker_remote_api_v1.9.md index 7ea3fc9ab1a94..26c6b453e0345 100644 --- a/docs/sources/reference/api/docker_remote_api_v1.9.md +++ b/docs/sources/reference/api/docker_remote_api_v1.9.md @@ -557,7 +557,7 @@ Status Codes: 1. Read 8 bytes 2. chose stdout or stderr depending on the first byte - 3. Extract the frame size from the last 4 byets + 3. Extract the frame size from the last 4 bytes 4. Read the extracted size and output it on the correct output 5. Goto 1) @@ -675,7 +675,7 @@ Status Codes: ## 2.2 Images -### List Images +### List images `GET /images/json` @@ -1052,7 +1052,7 @@ Query Parameters: Request Headers: - **Content-type** – should be set to `"application/tar"`. -- **X-Registry-Config** – base64-encoded ConfigFile objec +- **X-Registry-Config** – base64-encoded ConfigFile object Status Codes: @@ -1119,7 +1119,7 @@ Status Codes: - **200** – no error - **500** – server error -### Show the docker version information +### Show the Docker version information `GET /version` @@ -1343,7 +1343,7 @@ Here are the steps of `docker run` : In this version of the API, /attach, uses hijacking to transport stdin, stdout and stderr on the same socket. This might change in the future. -## 3.3 CORS Requests +## 3.3 CORS requests To enable cross origin requests to the remote api add the flag "--api-enable-cors" when running docker in daemon mode. diff --git a/docs/sources/reference/api/hub_registry_spec.md b/docs/sources/reference/api/hub_registry_spec.md index f01007587a37d..b1481e3a0344e 100644 --- a/docs/sources/reference/api/hub_registry_spec.md +++ b/docs/sources/reference/api/hub_registry_spec.md @@ -1,8 +1,8 @@ -page_title: Registry Documentation +page_title: Registry documentation page_description: Documentation for docker Registry and Registry API page_keywords: docker, registry, api, hub -# The Docker Hub and the Registry spec +# The Docker Hub and the Registry 1.0 spec ## The three roles @@ -28,9 +28,9 @@ The Docker Hub is authoritative for that information. There is only one instance of the Docker Hub, run and managed by Docker Inc. -### Registry +### Docker Registry 1.0 -The registry has the following characteristics: +The 1.0 registry has the following characteristics: - It stores the images and the graph for a set of repositories - It does not have user accounts data @@ -679,7 +679,7 @@ On every request, a special header can be returned: On the next request, the client will always pick a server from this list. -## Authentication & Authorization +## Authentication and authorization ### On the Docker Hub @@ -747,7 +747,7 @@ Next request: GET /(...) Cookie: session="wD/J7LqL5ctqw8haL10vgfhrb2Q=?foo=UydiYXInCnAxCi4=×tamp=RjEzNjYzMTQ5NDcuNDc0NjQzCi4=" -## Document Version +## Document version - 1.0 : May 6th 2013 : initial release - 1.1 : June 1st 2013 : Added Delete Repository and way to handle new diff --git a/docs/sources/reference/api/registry_api.md b/docs/sources/reference/api/registry_api.md index 54a158934a77c..13a51356f0462 100644 --- a/docs/sources/reference/api/registry_api.md +++ b/docs/sources/reference/api/registry_api.md @@ -2,11 +2,11 @@ page_title: Registry API page_description: API Documentation for Docker Registry page_keywords: API, Docker, index, registry, REST, documentation -# Docker Registry API +# Docker Registry API v1 ## Introduction - - This is the REST API for the Docker Registry + - This is the REST API for the Docker Registry 1.0 - It stores the images and the graph for a set of repositories - It does not have user accounts data - It has no notion of user accounts or authorization diff --git a/docs/sources/reference/api/registry_api_client_libraries.md b/docs/sources/reference/api/registry_api_client_libraries.md index 6977af3cc462f..965ba460330bd 100644 --- a/docs/sources/reference/api/registry_api_client_libraries.md +++ b/docs/sources/reference/api/registry_api_client_libraries.md @@ -1,8 +1,8 @@ -page_title: Registry API Client Libraries +page_title: Registry API client libraries page_description: Various client libraries available to use with the Docker registry API page_keywords: API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala -# Docker Registry API Client Libraries +# Docker Registry 1.0 API client libraries These libraries have not been tested by the Docker maintainers for compatibility. Please file issues with the library owners. If you find diff --git a/docs/sources/reference/api/remote_api_client_libraries.md b/docs/sources/reference/api/remote_api_client_libraries.md index d79bbd89ab136..cbe8f3a32840f 100644 --- a/docs/sources/reference/api/remote_api_client_libraries.md +++ b/docs/sources/reference/api/remote_api_client_libraries.md @@ -1,8 +1,8 @@ -page_title: Remote API Client Libraries +page_title: Remote API client libraries page_description: Various client libraries available to use with the Docker remote API page_keywords: API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala -# Docker Remote API Client Libraries +# Docker Remote API client libraries These libraries have not been tested by the Docker maintainers for compatibility. Please file issues with the library owners. If you find @@ -61,110 +61,116 @@ will add the libraries here. Active + Haskell + docker-hs + https://github.com/denibertovic/docker-hs + Active + + Java docker-java https://github.com/docker-java/docker-java Active - + Java docker-client https://github.com/spotify/docker-client Active - + Java jclouds-docker https://github.com/jclouds/jclouds-labs/tree/master/docker Active - + JavaScript (NodeJS) dockerode https://github.com/apocas/dockerode Install via NPM: npm install dockerode Active - + JavaScript (NodeJS) docker.io https://github.com/appersonlabs/docker.io Install via NPM: npm install docker.io Active - + JavaScript docker-js https://github.com/dgoujard/docker-js Outdated - + JavaScript (Angular) WebUI docker-cp https://github.com/13W/docker-cp Active - + JavaScript (Angular) WebUI dockerui https://github.com/crosbymichael/dockerui Active - + Perl Net::Docker https://metacpan.org/pod/Net::Docker Active - + Perl Eixo::Docker https://github.com/alambike/eixo-docker Active - + PHP Alvine http://pear.alvine.io/ (alpha) Active - + PHP Docker-PHP http://stage1.github.io/docker-php/ Active - + Python docker-py https://github.com/docker/docker-py Active - + Ruby docker-api https://github.com/swipely/docker-api Active - + Ruby docker-client https://github.com/geku/docker-client Outdated - + Rust docker-rust https://github.com/abh1nav/docker-rust Active - + Scala tugboat https://github.com/softprops/tugboat Active - + Scala reactive-docker https://github.com/almoehi/reactive-docker diff --git a/docs/sources/reference/builder.md b/docs/sources/reference/builder.md index d837541aa2401..7dbe549237f13 100644 --- a/docs/sources/reference/builder.md +++ b/docs/sources/reference/builder.md @@ -1,8 +1,8 @@ -page_title: Dockerfile Reference +page_title: Dockerfile reference page_description: Dockerfiles use a simple DSL which allows you to automate the steps you would normally manually take to create an image. page_keywords: builder, docker, Dockerfile, automation, image creation -# Dockerfile Reference +# Dockerfile reference **Docker can build images automatically** by reading the instructions from a `Dockerfile`. A `Dockerfile` is a text document that contains all @@ -41,10 +41,11 @@ whole context must be transferred to the daemon. The Docker CLI reports > repository, the entire contents of your hard drive will get sent to the daemon (and > thus to the machine running the daemon). You probably don't want that. -In most cases, it's best to put each Dockerfile in an empty directory, and then add only -the files needed for building that Dockerfile to that directory. To further speed up the -build, you can exclude files and directories by adding a `.dockerignore` file to the same -directory. +In most cases, it's best to put each Dockerfile in an empty directory. Then, +only add the files needed for building the Dockerfile to the directory. To +increase the build's performance, you can exclude files and directories by +adding a `.dockerignore` file to the directory. For information about how to +[create a `.dockerignore` file](#the-dockerignore-file) on this page. You can specify a repository and tag at which to save the new image if the build succeeds: @@ -105,7 +106,7 @@ be treated as an argument. This allows statements like: Here is the set of instructions you can use in a `Dockerfile` for building images. -### Environment Replacement +### Environment replacement > **Note**: prior to 1.3, `Dockerfile` environment variables were handled > similarly, in that they would be replaced as described below. However, there @@ -128,7 +129,7 @@ modifiers as specified below: * `${variable:-word}` indicates that if `variable` is set then the result will be that value. If `variable` is not set then `word` will be the result. -* `${variable:+word}` indiates that if `variable` is set then `word` will be +* `${variable:+word}` indicates that if `variable` is set then `word` will be the result, otherwise the result is the empty string. In all cases, `word` can be any string, including additional environment @@ -158,7 +159,7 @@ The instructions that handle environment variables in the `Dockerfile` are: `ONBUILD` instructions are **NOT** supported for environment replacement, even the instructions above. -Environment variable subtitution will use the same value for each variable +Environment variable substitution will use the same value for each variable throughout the entire command. In other words, in this example: ENV abc=hello @@ -169,43 +170,67 @@ will result in `def` having a value of `hello`, not `bye`. However, `ghi` will have a value of `bye` because it is not part of the same command that set `abc` to `bye`. -## The `.dockerignore` file +### .dockerignore file -If a file named `.dockerignore` exists in the source repository, then it -is interpreted as a newline-separated list of exclusion patterns. -Exclusion patterns match files or directories relative to the source repository -that will be excluded from the context. Globbing is done using Go's +If a file named `.dockerignore` exists in the root of `PATH`, then Docker +interprets it as a newline-separated list of exclusion patterns. Docker excludes +files or directories relative to `PATH` that match these exclusion patterns. If +there are any `.dockerignore` files in `PATH` subdirectories, Docker treats +them as normal files. + +Filepaths in `.dockerignore` are absolute with the current directory as the +root. Wildcards are allowed but the search is not recursive. Globbing (file name +expansion) is done using Go's [filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. -> **Note**: -> The `.dockerignore` file can even be used to ignore the `Dockerfile` and -> `.dockerignore` files. This might be useful if you are copying files from -> the root of the build context into your new containter but do not want to -> include the `Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`). +You can specify exceptions to exclusion rules. To do this, simply prefix a +pattern with an `!` (exclamation mark) in the same way you would in a +`.gitignore` file. Currently there is no support for regular expressions. +Formats like `[^temp*]` are ignored. -The following example shows the use of the `.dockerignore` file to exclude the -`.git` directory from the context. Its effect can be seen in the changed size of -the uploaded context. +The following is an example `.dockerignore` file: + +``` + */temp* + */*/temp* + temp? + *.md + !LICENCSE.md +``` + +This file causes the following build behavior: + +| Rule | Behavior | +|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `*/temp*` | Exclude all files with names starting with`temp` in any subdirectory below the root directory. For example, a file named`/somedir/temporary.txt` is ignored. | +| `*/*/temp*` | Exclude files starting with name `temp` from any subdirectory that is two levels below the root directory. For example, the file `/somedir/subdir/temporary.txt` is ignored. | +| `temp?` | Exclude the files that match the pattern in the root directory. For example, the files `tempa`, `tempb` in the root directory are ignored. | +| `*.md ` | Exclude all markdown files. | +| `!LICENSE.md` | Exception to the exclude all Markdown files is this file, `LICENSE.md`, include this file in the build. | + +The placement of `!` exception rules influences the matching algorithm; the +last line of the `.dockerignore` that matches a particular file determines +whether it is included or excluded. In the above example, the `LICENSE.md` file +matches both the `*.md` and `!LICENSE.md` rule. If you reverse the lines in the +example: + +``` + */temp* + */*/temp* + temp? + !LICENCSE.md + *.md +``` + +The build would exclude `LICENSE.md` because the last `*.md` rule adds all +Markdown files back onto the ignore list. The `!LICENSE.md` rule has no effect +because the subsequent `*.md` rule overrides it. + +You can even use the `.dockerignore` file to ignore the `Dockerfile` and +`.dockerignore` files. This is useful if you are copying files from the root of +the build context into your new container but do not want to include the +`Dockerfile` or `.dockerignore` files (e.g. `ADD . /someDir/`). - $ docker build . - Uploading context 18.829 MB - Uploading context - Step 0 : FROM busybox - ---> 769b9341d937 - Step 1 : CMD echo Hello World - ---> Using cache - ---> 99cc1ad10469 - Successfully built 99cc1ad10469 - $ echo ".git" > .dockerignore - $ docker build . - Uploading context 6.76 MB - Uploading context - Step 0 : FROM busybox - ---> 769b9341d937 - Step 1 : CMD echo Hello World - ---> Using cache - ---> 99cc1ad10469 - Successfully built 99cc1ad10469 ## FROM @@ -288,7 +313,7 @@ guide](/articles/dockerfile_best-practices/#build-cache) for more information. The cache for `RUN` instructions can be invalidated by `ADD` instructions. See [below](#add) for details. -### Known Issues (RUN) +### Known issues (RUN) - [Issue 783](https://github.com/docker/docker/issues/783) is about file permissions problems that can occur when using the AUFS file system. You @@ -299,7 +324,7 @@ The cache for `RUN` instructions can be invalidated by `ADD` instructions. See the layers with `dirperm1` option. More details on `dirperm1` option can be found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html) - If your system doesnt have support for `dirperm1`, the issue describes a workaround. + If your system doesn't have support for `dirperm1`, the issue describes a workaround. ## CMD @@ -368,14 +393,13 @@ default specified in `CMD`. The `LABEL` instruction adds metadata to an image. A `LABEL` is a key-value pair. To include spaces within a `LABEL` value, use quotes and -blackslashes as you would in command-line parsing. +backslashes as you would in command-line parsing. LABEL "com.example.vendor"="ACME Incorporated" An image can have more than one label. To specify multiple labels, separate each -key-value pair by an EOL. +key-value pair with whitespace. - LABEL com.example.label-without-value LABEL com.example.label-with-value="foo" LABEL version="1.0" LABEL description="This text illustrates \ @@ -385,6 +409,8 @@ Docker recommends combining labels in a single `LABEL` instruction where possible. Each `LABEL` instruction produces a new layer which can result in an inefficient image if you use many labels. This example results in four image layers. + + LABEL multi.label1="value1" multi.label2="value2" other="value3" Labels are additive including `LABEL`s in `FROM` images. As the system encounters and then applies a new label, new `key`s override any previous labels @@ -392,6 +418,16 @@ with identical keys. To view an image's labels, use the `docker inspect` command. + "Labels": { + "com.example.vendor": "ACME Incorporated" + "com.example.label-with-value": "foo", + "version": "1.0", + "description": "This text illustrates that label-values can span multiple lines.", + "multi.label1": "value1", + "multi.label2": "value2", + "other": "value3" + }, + ## EXPOSE EXPOSE [...] @@ -962,7 +998,7 @@ For example you might add something like this: > **Warning**: The `ONBUILD` instruction may not trigger `FROM` or `MAINTAINER` instructions. -## Dockerfile Examples +## Dockerfile examples # Nginx # diff --git a/docs/sources/reference/commandline/cli.md b/docs/sources/reference/commandline/cli.md index 9f8daa03f5978..c69f0a170e2fa 100644 --- a/docs/sources/reference/commandline/cli.md +++ b/docs/sources/reference/commandline/cli.md @@ -24,7 +24,7 @@ the `docker` command, your system administrator can create a Unix group called For more information about installing Docker or `sudo` configuration, refer to the [installation](/installation) instructions for your operating system. -## Environment Variables +## Environment variables For easy reference, the following list of environment variables are supported by the `docker` command line: @@ -48,6 +48,35 @@ These Go environment variables are case-insensitive. See the [Go specification](http://golang.org/pkg/net/http/) for details on these variables. +## Configuration files + +The Docker command line stores its configuration files in a directory called +`.docker` within your `HOME` directory. Docker manages most of the files in +`.docker` and you should not modify them. However, you *can modify* the +`.docker/config.json` file to control certain aspects of how the `docker` +command behaves. + +Currently, you can modify the `docker` command behavior using environment +variables or command-line options. You can also use options within +`config.json` to modify some of the same behavior. When using these +mechanisms, you must keep in mind the order of precedence among them. Command +line options override environment variables and environment variables override +properties you specify in a `config.json` file. + +The `config.json` file stores a JSON encoding of a single `HttpHeaders` +property. The property specifies a set of headers to include in all +messages sent from the Docker client to the daemon. Docker does not try to +interpret or understand these header; it simply puts them into the messages. +Docker does not allow these headers to change any headers it sets for itself. + +Following is a sample `config.json` file: + + { + "HttpHeaders: { + "MyHeader": "MyValue" + } + } + ## Help To list the help on any command just execute the command, followed by the `--help` option. @@ -116,6 +145,8 @@ expect an integer, and they can only be specified once. --bip="" Specify network bridge IP -D, --debug=false Enable debug mode -d, --daemon=false Enable daemon mode + --default-gateway="" Container default gateway IPv4 address + --default-gateway-v6="" Container default gateway IPv6 address --dns=[] DNS server to use --dns-search=[] DNS search domains to use -e, --exec-driver="native" Exec driver to use @@ -134,7 +165,7 @@ expect an integer, and they can only be specified once. --ipv6=false Enable IPv6 networking -l, --log-level="info" Set the logging level --label=[] Set key=value labels to the daemon - --log-driver="json-file" Container's logging driver (json-file/none) + --log-driver="json-file" Default driver for container logs --mtu=0 Set the containers network MTU -p, --pidfile="/var/run/docker.pid" Path to use for daemon PID file --registry-mirror=[] Preferred Docker registry mirror @@ -376,7 +407,42 @@ Currently supported options are: $ docker -d --storage-opt dm.blkdiscard=false -### Docker exec-driver option + * `dm.override_udev_sync_check` + + Overrides the `udev` synchronization checks between `devicemapper` and `udev`. + `udev` is the device manager for the Linux kernel. + + To view the `udev` sync support of a Docker daemon that is using the + `devicemapper` driver, run: + + $ docker info + [...] + Udev Sync Supported: true + [...] + + When `udev` sync support is `true`, then `devicemapper` and udev can + coordinate the activation and deactivation of devices for containers. + + When `udev` sync support is `false`, a race condition occurs between + the`devicemapper` and `udev` during create and cleanup. The race condition + results in errors and failures. (For information on these failures, see + [docker#4036](https://github.com/docker/docker/issues/4036)) + + To allow the `docker` daemon to start, regardless of `udev` sync not being + supported, set `dm.override_udev_sync_check` to true: + + $ docker -d --storage-opt dm.override_udev_sync_check=true + + When this value is `true`, the `devicemapper` continues and simply warns + you the errors are happening. + + > **Note**: The ideal is to pursue a `docker` daemon and environment that + > does support synchronizing with `udev`. For further discussion on this + > topic, see [docker#4036](https://github.com/docker/docker/issues/4036). + > Otherwise, set this flag for migrating existing Docker daemons to a + > daemon with a supported environment. + +### Docker execdriver option The Docker daemon uses a specifically built `libcontainer` execution driver as its interface to the Linux kernel `namespaces`, `cgroups`, and `SELinux`. @@ -386,6 +452,21 @@ https://linuxcontainers.org/) via the `lxc` execution driver, however, this is not where the primary development of new functionality is taking place. Add `-e lxc` to the daemon flags to use the `lxc` execution driver. +#### Options for the native execdriver + +You can configure the `native` (libcontainer) execdriver using options specified +with the `--exec-opt` flag. All the flag's options have the `native` prefix. A +single `native.cgroupdriver` option is available. + +The `native.cgroupdriver` option specifies the management of the container's +cgroups. You can specify `cgroupfs` or `systemd`. If you specify `systemd` and +it is not available, the system uses `cgroupfs`. By default, if no option is +specified, the execdriver first tries `systemd` and falls back to `cgroupfs`. +This example sets the execdriver to `cgroupfs`: + + $ sudo docker -d --exec-opt native.cgroupdriver=cgroupfs + +Setting this option applies to all containers the daemon launches. ### Daemon DNS options @@ -481,10 +562,16 @@ interactively. You can attach to the same contained process multiple times simultaneously, screen sharing style, or quickly view the progress of your daemonized process. -You can detach from the container (and leave it running) with `CTRL-p CTRL-q` -(for a quiet exit) or `CTRL-c` which will send a `SIGKILL` to the container. -When you are attached to a container, and exit its main process, the process's -exit code will be returned to the client. +You can detach from the container and leave it running with `CTRL-p +CTRL-q` (for a quiet exit) or with `CTRL-c` if `--sig-proxy` is false. + +If `--sig-proxy` is true (the default),`CTRL-c` sends a `SIGINT` +to the container. + +>**Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. It is forbidden to redirect the standard input of a `docker attach` command while attaching to a tty-enabled container (i.e.: launched with `-t`). @@ -556,6 +643,7 @@ is returned by the `docker attach` command to its caller too: --memory-swap="" Total memory (memory + swap), `-1` to disable swap -c, --cpu-shares CPU Shares (relative weight) --cpuset-cpus="" CPUs in which to allow execution, e.g. `0-3`, `0,1` + --cpuset-mems="" MEMs in which to allow execution, e.g. `0-3`, `0,1` Builds Docker images from a Dockerfile and a "context". A build's context is the files located in the specified `PATH` or `URL`. The build process can @@ -563,12 +651,13 @@ refer to any of the files in the context. For example, your build can use an [*ADD*](/reference/builder/#add) instruction to reference a file in the context. -The `URL` parameter can specify the location of a Git repository; in this -case, the repository is the context. The Git repository is recursively -cloned with its submodules. The system does a fresh `git clone -recursive` -in a temporary directory on your local host. Then, this clone is sent to -the Docker daemon as the context. Local clones give you the ability to -access private repositories using local user credentials, VPN's, and so forth. +The `URL` parameter can specify the location of a Git repository; +the repository acts as the build context. The system recursively clones the repository +and its submodules using a `git clone --depth 1 --recursive` command. +This command runs in a temporary directory on your local host. +After the command succeeds, the directory is sent to the Docker daemon as the context. +Local clones give you the ability to access private repositories using +local user credentials, VPN's, and so forth. Instead of specifying a context, you can pass a single Dockerfile in the `URL` or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: @@ -579,6 +668,26 @@ If you use STDIN or specify a `URL`, the system places the contents into a file called `Dockerfile`, and any `-f`, `--file` option is ignored. In this scenario, there is no context. +By default the `docker build` command will look for a `Dockerfile` at the +root of the build context. The `-f`, `--file`, option lets you specify +the path to an alternative file to use instead. This is useful +in cases where the same set of files are used for multiple builds. The path +must be to a file within the build context. If a relative path is specified +then it must to be relative to the current directory. + +In most cases, it's best to put each Dockerfile in an empty directory. Then, add +to that directory only the files needed for building the Dockerfile. To increase +the build's performance, you can exclude files and directories by adding a +`.dockerignore` file to that directory as well. For information on creating one, +see the [.dockerignore file](../../reference/builder/#dockerignore-file). + +If the Docker client loses connection to the daemon, the build is canceled. +This happens if you interrupt the Docker client with `ctrl-c` or if the Docker +client is killed for any reason. + +> **Note:** Currently only the "run" phase of the build can be canceled until +> pull cancelation is implemented). + ### Return code On a successful build, a return code of success `0` will be returned. @@ -599,55 +708,11 @@ INFO[0000] The command [/bin/sh -c exit 13] returned a non-zero code: 13 $ echo $? 1 ``` - -### .dockerignore file - -If a file named `.dockerignore` exists in the root of `PATH` then it -is interpreted as a newline-separated list of exclusion patterns. -Exclusion patterns match files or directories relative to `PATH` that -will be excluded from the context. Globbing is done using Go's -[filepath.Match](http://golang.org/pkg/path/filepath#Match) rules. - -Please note that `.dockerignore` files in other subdirectories are -considered as normal files. Filepaths in `.dockerignore` are absolute with -the current directory as the root. Wildcards are allowed but the search -is not recursive. - -#### Example .dockerignore file - */temp* - */*/temp* - temp? - -The first line above `*/temp*`, would ignore all files with names starting with -`temp` from any subdirectory below the root directory. For example, a file named -`/somedir/temporary.txt` would be ignored. The second line `*/*/temp*`, will -ignore files starting with name `temp` from any subdirectory that is two levels -below the root directory. For example, the file `/somedir/subdir/temporary.txt` -would get ignored in this case. The last line in the above example `temp?` -will ignore the files that match the pattern from the root directory. -For example, the files `tempa`, `tempb` are ignored from the root directory. -Currently there is no support for regular expressions. Formats -like `[^temp*]` are ignored. - -By default the `docker build` command will look for a `Dockerfile` at the -root of the build context. The `-f`, `--file`, option lets you specify -the path to an alternative file to use instead. This is useful -in cases where the same set of files are used for multiple builds. The path -must be to a file within the build context. If a relative path is specified -then it must to be relative to the current directory. - -If the Docker client loses connection to the daemon, the build is canceled. -This happens if you interrupt the Docker client with `ctrl-c` or if the Docker -client is killed for any reason. - -> **Note:** Currently only the "run" phase of the build can be canceled until -> pull cancelation is implemented). - See also: [*Dockerfile Reference*](/reference/builder). -#### Examples +### Examples $ docker build . Uploading context 10240 bytes @@ -716,7 +781,8 @@ affect the build cache. This example shows the use of the `.dockerignore` file to exclude the `.git` directory from the context. Its effect can be seen in the changed size of the -uploaded context. +uploaded context. The builder reference contains detailed information on +[creating a .dockerignore file](../../builder/#dockerignore-file) $ docker build -t vieux/apache:2.0 . @@ -797,7 +863,8 @@ If this behavior is undesired, set the 'p' option to false. The `--change` option will apply `Dockerfile` instructions to the image that is created. -Supported `Dockerfile` instructions: `ADD`|`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`FROM`|`MAINTAINER`|`RUN`|`USER`|`LABEL`|`VOLUME`|`WORKDIR`|`COPY` +Supported `Dockerfile` instructions: +`CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` #### Commit a container @@ -851,6 +918,8 @@ Creates a new container. --cgroup-parent="" Optional parent cgroup for the container --cidfile="" Write the container ID to the file --cpuset-cpus="" CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems="" Memory nodes (MEMs) in which to allow execution (0-3, 0,1) + --cpu-quota=0 Limit the CPU CFS (Completely Fair Scheduler) quota --device=[] Add a host device to the container --dns=[] Set custom DNS servers --dns-search=[] Set custom DNS search domains @@ -1079,7 +1148,9 @@ You'll need two shells for this example. -d, --detach=false Detached mode: run command in the background -i, --interactive=false Keep STDIN open even if not attached + --privileged=false Give extended privileges to the command -t, --tty=false Allocate a pseudo-TTY + -u, --user= Username or UID (format: [:]) The `docker exec` command runs a new command in a running container. @@ -1146,19 +1217,30 @@ This will create a new Bash session in the container `ubuntu_bash`. Show the history of an image + -H, --human=true Print sizes and dates in human readable format --no-trunc=false Don't truncate output -q, --quiet=false Only show numeric IDs To see how the `docker:latest` image was built: $ docker history docker - IMAGE CREATED CREATED BY SIZE - 3e23a5875458790b7a806f95f7ec0d0b2a5c1659bfc899c89f939f6d5b8f7094 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B - 8578938dd17054dce7993d21de79e96a037400e8d28e15e7290fea4f65128a36 8 days ago /bin/sh -c dpkg-reconfigure locales && locale-gen C.UTF-8 && /usr/sbin/update-locale LANG=C.UTF-8 1.245 MB - be51b77efb42f67a5e96437b3e102f81e0a1399038f77bf28cea0ed23a65cf60 8 days ago /bin/sh -c apt-get update && apt-get install -y git libxml2-dev python build-essential make gcc python-dev locales python-pip 338.3 MB - 4b137612be55ca69776c7f30c2d2dd0aa2e7d72059820abf3e25b629f887a084 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB - 750d58736b4b6cc0f9a9abe8f258cef269e3e9dceced1146503522be9f985ada 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -t jessie.tar.xz jessie http://http.debian.net/debian 0 B - 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 9 months ago 0 B + IMAGE CREATED CREATED BY SIZE COMMENT + 3e23a5875458 8 days ago /bin/sh -c #(nop) ENV LC_ALL=C.UTF-8 0 B + 8578938dd170 8 days ago /bin/sh -c dpkg-reconfigure locales && loc 1.245 MB + be51b77efb42 8 days ago /bin/sh -c apt-get update && apt-get install 338.3 MB + 4b137612be55 6 weeks ago /bin/sh -c #(nop) ADD jessie.tar.xz in / 121 MB + 750d58736b4b 6 weeks ago /bin/sh -c #(nop) MAINTAINER Tianon Gravi **Note**: A process running as PID 1 inside a container is treated +>specially by Linux: it ignores any signal with the default action. +>So, the process will not terminate on `SIGINT` or `SIGTERM` unless it is +>coded to do so. + ## Container identification ### Name (--name) @@ -151,7 +156,7 @@ Images using the v2 or later image format have a content-addressable identifier called a digest. As long as the input used to generate the image is unchanged, the digest value is predictable and referenceable. -## PID Settings (--pid) +## PID settings (--pid) --pid="" : Set the PID (Process) Namespace mode for the container, 'host': use the host's PID namespace inside the container @@ -172,7 +177,7 @@ within the container. This command would allow you to use `strace` inside the container on pid 1234 on the host. -## IPC Settings (--ipc) +## IPC settings (--ipc) --ipc="" : Set the IPC mode for the container, 'container:': reuses another container's IPC namespace @@ -375,7 +380,7 @@ This means the daemon will wait for 100 ms, then 200 ms, 400, 800, 1600, and so on until either the `on-failure` limit is hit, or when you `docker stop` or `docker rm -f` the container. -If a container is succesfully restarted (the container is started and runs +If a container is successfully restarted (the container is started and runs for at least 10 seconds), the delay is reset to its default value of 100 ms. You can specify the maximum amount of times Docker will try to restart the @@ -469,6 +474,8 @@ container: -memory-swap="": Total memory limit (memory + swap, format: , where unit = b, k, m or g) -c, --cpu-shares=0: CPU shares (relative weight) --cpuset-cpus="": CPUs in which to allow execution (0-3, 0,1) + --cpuset-mems="": Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. + --cpu-quota=0: Limit the CPU CFS (Completely Fair Scheduler) quota ### Memory constraints @@ -594,6 +601,30 @@ This means processes in container can be executed on cpu 1 and cpu 3. This means processes in container can be executed on cpu 0, cpu 1 and cpu 2. +We can set mems in which to allow execution for containers. Only effective +on NUMA systems. + +Examples: + + $ docker run -ti --cpuset-mems="1,3" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 1 and 3. + + $ docker run -ti --cpuset-mems="0-2" ubuntu:14.04 /bin/bash + +This example restricts the processes in the container to only use memory from +memory nodes 0, 1 and 2. + +### CPU quota constraint + +The `--cpu-quota` flag limits the container's CPU usage. The default 0 value +allows the container to take 100% of a CPU resource (1 CPU). The CFS (Completely Fair +Scheduler) handles resource allocation for executing processes and is default +Linux Scheduler used by the kernel. Set this value to 50000 to limit the container +to 50% of a CPU resource. For multiple CPUs, adjust the `--cpu-quota` as necessary. +For more information, see the [CFS documentation on bandwidth limiting](https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt). + ## Runtime privilege, Linux capabilities, and LXC configuration --cap-add: Add Linux capabilities @@ -757,6 +788,10 @@ command is available only for this logging driver Syslog logging driver for Docker. Writes log messages to syslog. `docker logs` command is not available for this logging driver +#### Logging driver: journald + +Journald logging driver for Docker. Writes log messages to journald. `docker logs` command is not available for this logging driver + ## Overriding Dockerfile image defaults When a developer builds an image from a [*Dockerfile*](/reference/builder) diff --git a/docs/sources/release-notes.md b/docs/sources/release-notes.md index fe79d881dc986..1a32cbb98073e 100644 --- a/docs/sources/release-notes.md +++ b/docs/sources/release-notes.md @@ -1,75 +1,133 @@ -page_title: Docker 1.x Series Release Notes -page_description: Release Notes for Docker 1.x. +page_title: Docker 1.x series release notes +page_description: Release notes for Docker 1.x. page_keywords: docker, documentation, about, technology, understanding, release -# Release Notes +# Release notes version 1.6.0 +(2015-04-16) You can view release notes for earlier version of Docker by selecting the -desired version from the drop-down list at the top right of this page. - -## Version 1.5.0 -(2015-02-03) - -For a complete list of patches, fixes, and other improvements, see the -[merge PR on GitHub](https://github.com/docker/docker/pull/10286). - -*New Features* - -* [1.6] The Docker daemon will no longer ignore unknown commands - while processing a `Dockerfile`. Instead it will generate an error and halt - processing. -* The Docker daemon has now supports for IPv6 networking between containers - and on the `docker0` bridge. For more information see the - [IPv6 networking reference](/articles/networking/#ipv6). -* Docker container filesystems can now be set to`--read-only`, restricting your - container to writing to volumes [PR# 10093](https://github.com/docker/docker/pull/10093). -* A new `docker stats CONTAINERID` command has been added to allow users to view a - continuously updating stream of container resource usage statistics. See the - [`stats` command line reference](/reference/commandline/cli/#stats) and the - [container `stats` API reference](/reference/api/docker_remote_api_v1.17/#get-container-stats-based-on-resource-usage). - **Note**: this feature is only enabled for the `libcontainer` exec-driver at this point. -* Users can now specify the file to use as the `Dockerfile` by running - `docker build -f alternate.dockerfile .`. This will allow the definition of multiple - `Dockerfile`s for a single project. See the [`docker build` command reference]( -/reference/commandline/cli/#build) for more information. -* The v1 Open Image specification has been created to document the current Docker image - format and metadata. Please see [the Open Image specification document]( -https://github.com/docker/docker/blob/master/image/spec/v1.md) for more details. -* This release also includes a number of significant performance improvements in - build and image management ([PR #9720](https://github.com/docker/docker/pull/9720), - [PR #8827](https://github.com/docker/docker/pull/8827)) -* The `docker inspect` command now lists ExecIDs generated for each `docker exec` process. - See [PR #9800](https://github.com/docker/docker/pull/9800)) for more details. -* The `docker inspect` command now shows the number of container restarts when there - is a restart policy ([PR #9621](https://github.com/docker/docker/pull/9621)) -* This version of Docker is built using Go 1.4 - -> **Note:** -> Development history prior to version 1.0 can be found by -> searching in the [Docker GitHub repo](https://github.com/docker/docker). - -## Known Issues - -This section lists significant known issues present in Docker as of release -date. It is not exhaustive; it lists only issues with potentially significant -impact on users. This list will be updated as issues are resolved. - -* **Unexpected File Permissions in Containers** -An idiosyncrasy in AUFS prevents permissions from propagating predictably -between upper and lower layers. This can cause issues with accessing private -keys, database instances, etc. - -For systems that have recent aufs version (i.e., `dirperm1` mount option can -be set), docker will attempt to fix the issue automatically by mounting -the layers with `dirperm1` option. More details on `dirperm1` option can be -found at [`aufs` man page](http://aufs.sourceforge.net/aufs3/man.html) - -For complete information and workarounds see +desired version from the drop-down list at the top right of this page. For the +formal release announcement, see [the Docker +blog](https://blog.docker.com/2015/04/docker-release-1-6/). + + + +## Docker Engine 1.6.0 features + +For a complete list of engine patches, fixes, and other improvements, see the +[merge PR on GitHub](https://github.com/docker/docker/pull/11635). You'll also +find [a changelog in the project +repository](https://github.com/docker/docker/blob/master/CHANGELOG.md). + + +| Feature | Description | +|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Container and Image Labels | Labels allow you to attach user-defined metadata to containers and images that can be used by your tools. For additional information on using labels, see [Apply custom metadata](http://docs.docker.com/userguide/labels-custom-metadata/#add-labels-to-images-the-label-instruction) in the documentation. | +| Windows Client preview | The Windows Client can be used just like the Mac OS X client is today with a remote host. Our testing infrastructure was scaled out to accommodate Windows Client testing on every PR to the Engine. See the Azure blog for [details on using this new client](http://azure.microsoft.com/blog/2015/04/16/docker-client-for-windows-is-now-available). | +| Logging drivers | The new logging driver follows the exec driver and storage driver concepts already available in Engine today. There is a new option `--log-driver` to `docker run` command. See the `run` reference for a [description on how to use this option](http://docs.docker.com/reference/run/#logging-drivers-log-driver). | +| Image digests | When you pull, build, or run images, you specify them in the form `namespace/repository:tag`, or even just `repository`. In this release, you are now able to pull, run, build and refer to images by a new content addressable identifier called a “digest” with the syntax `namespace/repo@digest`. See the the command line reference for [examples of using the digest](http://docs.docker.com/reference/commandline/cli/#listing-image-digests). | +| Custom cgroups | Containers are made from a combination of namespaces, capabilities, and cgroups. Docker already supports custom namespaces and capabilities. Additionally, in this release we’ve added support for custom cgroups. Using the `--cgroup-parent` flag, you can pass a specific `cgroup` to run a container in. See [the command line reference for more information](http://docs.docker.com/reference/commandline/cli/#create). | +| Ulimits | You can now specify the default `ulimit` settings for all containers when configuring the daemon. For example:`docker -d --default-ulimit nproc=1024:2048` See [Default Ulimits](http://docs.docker.com/reference/commandline/cli/#default-ulimits) in this documentation. | +| Commit and import Dockerfile | You can now make changes to images on the fly without having to re-build the entire image. The feature `commit --change` and `import --change` allows you to apply standard changes to a new image. These are expressed in the Dockerfile syntax and used to modify the image. For details on how to use these, see the [commit](http://docs.docker.com/reference/commandline/cli/#commit) and [import](http://docs.docker.com/reference/commandline/cli/#import). | + +### Known issues in Engine + +This section lists significant known issues present in Docker as of release date. +For an exhaustive list of issues, see [the issues list on the project +repository](https://github.com/docker/docker/issues/). + +* *Unexpected File Permissions in Containers* +An idiosyncrasy in AUFS prevented permissions from propagating predictably +between upper and lower layers. This caused issues with accessing private +keys, database instances, etc. This issue was closed in this release: [Github Issue 783](https://github.com/docker/docker/issues/783). -* **Docker Hub incompatible with Safari 8** -Docker Hub has multiple issues displaying on Safari 8, the default browser -for OS X 10.10 (Yosemite). Users should access the hub using a different -browser. Most notably, changes in the way Safari handles cookies means that the -user is repeatedly logged out. For more information, see the [Docker -forum post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300). + +* *Docker Hub incompatible with Safari 8* +Docker Hub had multiple issues displaying on Safari 8, the default browser for +OS X 10.10 (Yosemite). Most notably, changes in the way Safari handled cookies +means that the user was repeatedly logged out. +Recently, Safari fixed the bug that was causing all the issues. If you upgrade +to Safari 8.0.5 which was just released last week and see if that fixes your +issues. You might have to flush your cookies if it doesn't work right away. +For more information, see the [Docker forum +post](https://forums.docker.com/t/new-safari-in-yosemite-issue/300). + +## Docker Registry 2.0 features + +This release includes Registry 2.0. The Docker Registry is a central server for +pushing and pulling images. In this release, it was completely rewritten in Go +around a new set of distribution APIs + +- **Webhook notifications**: You can now configure the Registry to send Webhooks +when images are pushed. Spin off a CI build, send a notification to IRC – +whatever you want! Included in the documentation is a detailed [notification +specification](http://docs.docker.com/registry/notifications/). + +- **Native TLS support**: This release makes it easier to secure a registry with +TLS. This documentation includes [expanded examples of secure +deployments](http://docs.docker.com/registry/deploying/). + +- **New Distribution APIs**: This release includes an expanded set of new +distribution APIs. You can read the [detailed specification +here](http://docs.docker.com/registry/spec/api/). + + +## Docker Compose 1.2 + +For a complete list of compose patches, fixes, and other improvements, see the +[changelog in the project +repository](https://github.com/docker/compose/blob/master/CHANGES.md). The +project also makes a [set of release +notes](https://github.com/docker/compose/releases/tag/1.2.0) on the project. + +- **extends**: You can use `extends` to share configuration between services +with the keyword “extends”. With extends, you can refer to a service defined +elsewhere and include its configuration in a locally-defined service, while also +adding or overriding configuration as necessary. The documentation describes +[how to use extends in your +configuration](http://docs.docker.com/compose/extends/#extending-services-in- +compose). + +- **Relative directory handling may cause breaking change**: Compose now treats +directories passed to build, filenames passed to `env_file` and volume host +paths passed to volumes as relative to the configuration file's directory. +Previously, they were treated as relative to the directory where you were +running `docker-compose`. In the majority of cases, the location of the +configuration file and where you ran `docker-compose` were the same directory. +Now, you can use the `-f|--file` argument to specify a configuration file in +another directory. + + +## Docker Swarm 0.2 + +You'll find the [release for download on +GitHub](https://github.com/docker/swarm/releases/tag/v0.2.0) and [the +documentation here](http://docs.docker.com/swarm/). This release includes the +following features: + +- **Spread strategy**: A new strategy for scheduling containers on your cluster +which evenly spreads them over available nodes. +- **More Docker commands supported**: More progress has been made towards +supporting the complete Docker API, such as pulling and inspecting images. +- **Clustering drivers**: There are not any third-party drivers yet, but the +first steps have been made towards making a pluggable driver interface that will +make it possible to use Swarm with clustering systems such as Mesos. + + +## Docker Machine 0.2 Pre-release + +You'll find the [release for download on +GitHub](https://github.com/docker/machine/releases) and [the documentation +here](http://docs.docker.com/machine/). For a complete list of machine changes +see [the changelog in the project +repository](https://github.com/docker/machine/blob/master/CHANGES.md#020-2015-03 +-22). + +- **Cleaner driver interface**: It is now much easier to write drivers for providers. +- **More reliable and consistent provisioning**: Provisioning servers is now +handled centrally by Machine instead of letting each driver individually do it. +- **Regenerate TLS certificates**: A new command has been added to regenerate a +host’s TLS certificates for good security practice and for if a host’s IP +address changes. + diff --git a/docs/sources/terms/container.md b/docs/sources/terms/container.md index 8b42868788eef..d0c31c2455058 100644 --- a/docs/sources/terms/container.md +++ b/docs/sources/terms/container.md @@ -17,7 +17,7 @@ Image*](/terms/image) and some additional information like its unique id, networking configuration, and resource limits is called a **container**. -## Container State +## Container state Containers can change, and so they have state. A container may be **running** or **exited**. diff --git a/docs/sources/terms/filesystem.md b/docs/sources/terms/filesystem.md index 5587e3c83179d..814246d8b998f 100644 --- a/docs/sources/terms/filesystem.md +++ b/docs/sources/terms/filesystem.md @@ -1,8 +1,8 @@ -page_title: File Systems +page_title: File system page_description: How Linux organizes its persistent storage page_keywords: containers, files, linux -# File System +# File system ## Introduction diff --git a/docs/sources/terms/image.md b/docs/sources/terms/image.md index e42a6cfa12162..0a11d91c9ec90 100644 --- a/docs/sources/terms/image.md +++ b/docs/sources/terms/image.md @@ -1,4 +1,4 @@ -page_title: Images +page_title: Image page_description: Definition of an image page_keywords: containers, lxc, concepts, explanation, image, container @@ -19,7 +19,7 @@ images do not have state. ![](/terms/images/docker-filesystems-debianrw.png) -## Parent Image +## Parent image ![](/terms/images/docker-filesystems-multilayer.png) @@ -27,7 +27,7 @@ Each image may depend on one more image which forms the layer beneath it. We sometimes say that the lower image is the **parent** of the upper image. -## Base Image +## Base image An image that has no parent is a **base image**. diff --git a/docs/sources/terms/registry.md b/docs/sources/terms/registry.md index 68120812c37ea..ad5a81d640600 100644 --- a/docs/sources/terms/registry.md +++ b/docs/sources/terms/registry.md @@ -14,7 +14,7 @@ The default registry can be accessed using a browser at [Docker Hub](https://hub.docker.com) or using the `docker search` command. -## Further Reading +## Further reading For more information see [*Working with Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/sources/terms/repository.md b/docs/sources/terms/repository.md index 84963b4bf94a1..4b8579924fbf3 100644 --- a/docs/sources/terms/repository.md +++ b/docs/sources/terms/repository.md @@ -29,7 +29,7 @@ A Fully Qualified Image Name (FQIN) can be made up of 3 parts: If you create a new repository which you want to share, you will need to set at least the `user_name`, as the `default` blank `user_name` prefix is -reserved for official Docker images. +reserved for [Official Repositories](/docker-hub/official_repos). For more information see [*Working with Repositories*](/userguide/dockerrepos/#working-with-the-repository) diff --git a/docs/sources/userguide/dockerhub.md b/docs/sources/userguide/dockerhub.md index 3d4007d3025c9..2f7170d64f9f0 100644 --- a/docs/sources/userguide/dockerhub.md +++ b/docs/sources/userguide/dockerhub.md @@ -2,7 +2,7 @@ page_title: Getting started with Docker Hub page_description: Introductory guide to getting an account on Docker Hub page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, central service, services, how to, container, containers, automation, collaboration, collaborators, registry, repo, repository, technology, github webhooks, trusted builds -# Getting Started with Docker Hub +# Getting started with Docker Hub This section provides a quick introduction to the [Docker Hub](https://hub.docker.com), @@ -21,7 +21,7 @@ most out of Docker. To do this, it provides services such as: In order to use Docker Hub, you will first need to register and create an account. Don't worry, creating an account is simple and free. -## Creating a Docker Hub Account +## Creating a Docker Hub account There are two ways for you to register and create an account: diff --git a/docs/sources/userguide/dockerimages.md b/docs/sources/userguide/dockerimages.md index f97231506e3f1..c29b01032c94b 100644 --- a/docs/sources/userguide/dockerimages.md +++ b/docs/sources/userguide/dockerimages.md @@ -1,8 +1,8 @@ -page_title: Working with Docker Images +page_title: Working with Docker images page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration -# Working with Docker Images +# Working with Docker images In the [introduction](/introduction/understanding-docker/) we've discovered that Docker images are the basis of containers. In the @@ -131,11 +131,11 @@ term `sinatra`. We can see we've returned a lot of images that use the term `sinatra`. We've returned a list of image names, descriptions, Stars (which measure the social popularity of images - if a user likes an image then they can "star" it), and -the Official and Automated build statuses. Official repositories are built and -maintained by the [Stackbrew](https://github.com/docker/stackbrew) project, -and Automated repositories are [Automated Builds]( -/userguide/dockerrepos/#automated-builds) that allow you to validate the source -and content of an image. +the Official and Automated build statuses. +[Official Repositories](/docker-hub/official_repos) are a carefully curated set +of Docker repositories supported by Docker, Inc. Automated repositories are +[Automated Builds](/userguide/dockerrepos/#automated-builds) that allow you to +validate the source and content of an image. We've reviewed the images available to use and we decided to use the `training/sinatra` image. So far we've seen two types of images repositories, @@ -505,6 +505,25 @@ Let's see our new tag using the `docker images` command. ouruser/sinatra devel 5db5f8471261 11 hours ago 446.7 MB ouruser/sinatra v2 5db5f8471261 11 hours ago 446.7 MB +## Image Digests + +Images that use the v2 or later format have a content-addressable identifier +called a `digest`. As long as the input used to generate the image is +unchanged, the digest value is predictable. To list image digest values, use +the `--digests` flag: + + $ docker images --digests | head + REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE + ouruser/sinatra latest sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf 5db5f8471261 11 hours ago 446.7 MB + +When pushing or pulling to a 2.0 registry, the `push` or `pull` command +output includes the image digest. You can `pull` using a digest value. + + $ docker pull ouruser/sinatra@cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf + +You can also reference by digest in `create`, `run`, and `rmi` commands, as well as the +`FROM` image reference in a Dockerfile. + ## Push an image to Docker Hub Once you've built or created a new image you can push it to [Docker diff --git a/docs/sources/userguide/dockerizing.md b/docs/sources/userguide/dockerizing.md index 5896dd78e0a1f..7124ba6c9ca1c 100644 --- a/docs/sources/userguide/dockerizing.md +++ b/docs/sources/userguide/dockerizing.md @@ -1,8 +1,8 @@ -page_title: Dockerizing Applications: A "Hello world" +page_title: Dockerizing applications: A "Hello world" page_description: A simple "Hello world" exercise that introduced you to Docker. page_keywords: docker guide, docker, docker platform, virtualization framework, how to, dockerize, dockerizing apps, dockerizing applications, container, containers -# Dockerizing Applications: A "Hello world" +# Dockerizing applications: A "Hello world" *So what's this Docker thing all about?* @@ -48,7 +48,7 @@ So what happened to our container after that? Well Docker containers only run as long as the command you specify is active. Here, as soon as `Hello world` was echoed, the container stopped. -## An Interactive Container +## An interactive container Let's try the `docker run` command again, this time specifying a new command to run in our container. @@ -90,7 +90,7 @@ use the `exit` command or enter Ctrl-D to finish. As with our previous container, once the Bash shell process has finished, the container is stopped. -## A Daemonized Hello world +## A daemonized Hello world Now a container that runs a command and then exits has some uses but it's not overly helpful. Let's create a container that runs as a daemon, diff --git a/docs/sources/userguide/dockerlinks.md b/docs/sources/userguide/dockerlinks.md index 66dd3d7a4ee67..8a20388463cfd 100644 --- a/docs/sources/userguide/dockerlinks.md +++ b/docs/sources/userguide/dockerlinks.md @@ -1,8 +1,8 @@ -page_title: Linking Containers Together +page_title: Linking containers together page_description: Learn how to connect Docker containers together. page_keywords: Examples, Usage, user guide, links, linking, docker, documentation, examples, names, name, container naming, port, map, network port, network -# Linking Containers Together +# Linking containers together In [the Using Docker section](/userguide/usingdocker), you saw how you can connect to a service running inside a Docker container via a network @@ -11,7 +11,7 @@ applications running inside Docker containers. In this section, we'll briefly re connecting via a network port and then we'll introduce you to another method of access: container linking. -## Connect using Network port mapping +## Connect using network port mapping In [the Using Docker section](/userguide/usingdocker), you created a container that ran a Python Flask application: @@ -175,7 +175,7 @@ recipient container in two ways: * Environment variables, * Updating the `/etc/hosts` file. -### Environment Variables +### Environment variables Docker creates several environment variables when you link containers. Docker automatically creates environment variables in the target container based on diff --git a/docs/sources/userguide/dockerrepos.md b/docs/sources/userguide/dockerrepos.md index a8a1800f5129b..8fc2ba637fdfb 100644 --- a/docs/sources/userguide/dockerrepos.md +++ b/docs/sources/userguide/dockerrepos.md @@ -51,12 +51,12 @@ name, user name, or description: tianon/centos CentOS 5 and 6, created using rinse instea... 21 ... -There you can see two example results: `centos` and -`tianon/centos`. The second result shows that it comes from -the public repository of a user, named `tianon/`, while the first result, -`centos`, doesn't explicitly list a repository which means that it comes from the -trusted top-level namespace. The `/` character separates a user's -repository from the image name. +There you can see two example results: `centos` and `tianon/centos`. The second +result shows that it comes from the public repository of a user, named +`tianon/`, while the first result, `centos`, doesn't explicitly list a +repository which means that it comes from the trusted top-level namespace for +[Official Repositories](/docker-hub/official_repos). The `/` character separates +a user's repository from the image name. Once you've found the image you want, you can download it with `docker pull `: @@ -101,7 +101,7 @@ information [here](http://docs.docker.com/docker-hub/). * Automated Builds * Webhooks -### Private Repositories +### Private repositories Sometimes you have images you don't want to make public and share with everyone. So Docker Hub allows you to have private repositories. You can @@ -150,7 +150,7 @@ repository. You can create multiple Automated Builds per repository and configure them to point to specific `Dockerfile`'s or Git branches. -#### Build Triggers +#### Build triggers Automated Builds can also be triggered via a URL on Docker Hub. This allows you to rebuild an Automated build image on demand. diff --git a/docs/sources/userguide/dockervolumes.md b/docs/sources/userguide/dockervolumes.md index c319ecee5cdf9..c7126d7c33796 100644 --- a/docs/sources/userguide/dockervolumes.md +++ b/docs/sources/userguide/dockervolumes.md @@ -1,8 +1,8 @@ -page_title: Managing Data in Containers +page_title: Managing data in containers page_description: How to manage data inside your Docker containers. page_keywords: Examples, Usage, volume, docker, documentation, user guide, data, volumes -# Managing Data in Containers +# Managing data in containers So far we've been introduced to some [basic Docker concepts](/userguide/usingdocker/), seen how to work with [Docker @@ -25,8 +25,8 @@ System*](/terms/layer/#union-file-system). Data volumes provide several useful features for persistent or shared data: - Volumes are initialized when a container is created. If the container's - base image contains data at the specified mount point, that data is - copied into the new volume. + base image contains data at the specified mount point, that existing data is + copied into the new volume upon volume initialization. - Data volumes can be shared and reused among containers. - Changes to a data volume are made directly. - Changes to a data volume will not be included when you update an image. @@ -73,7 +73,7 @@ volumes. The output should look something similar to the following: You will notice in the above 'Volumes' is specifying the location on the host and 'VolumesRW' is specifying that the volume is read/write. -### Mount a Host Directory as a Data Volume +### Mount a host directory as a data volume In addition to creating a volume using the `-v` flag you can also mount a directory from your Docker daemon's host into a container. @@ -116,7 +116,7 @@ read-only. Here we've mounted the same `/src/webapp` directory but we've added the `ro` option to specify that the mount should be read-only. -### Mount a Host File as a Data Volume +### Mount a host file as a data volume The `-v` flag can also be used to mount a single file - instead of *just* directories - from the host machine. @@ -134,7 +134,7 @@ history of the commands typed while in the container. > you want to edit the mounted file, it is often easiest to instead mount the > parent directory. -## Creating and mounting a Data Volume Container +## Creating and mounting a data volume container If you have some persistent data that you want to share between containers, or want to use from non-persistent containers, it's best to diff --git a/docs/sources/userguide/index.md b/docs/sources/userguide/index.md index d0dbdb84ee199..9cc1c6db30312 100644 --- a/docs/sources/userguide/index.md +++ b/docs/sources/userguide/index.md @@ -1,8 +1,8 @@ -page_title: The Docker User Guide -page_description: The Docker User Guide home page +page_title: The Docker user guide +page_description: The Docker user guide home page page_keywords: docker, introduction, documentation, about, technology, docker.io, user, guide, user's, manual, platform, framework, virtualization, home, intro -# Welcome to the Docker User Guide +# Welcome to the Docker user guide In the [Introduction](/) you got a taste of what Docker is and how it works. In this guide we're going to take you through the fundamentals of @@ -19,7 +19,7 @@ We’ll teach you how to use Docker to: We've broken this guide into major sections that take you through the Docker life cycle: -## Getting Started with Docker Hub +## Getting started with Docker Hub *How do I use Docker Hub?* @@ -29,7 +29,7 @@ environment. To learn more: Go to [Using Docker Hub](/userguide/dockerhub). -## Dockerizing Applications: A "Hello world" +## Dockerizing applications: A "Hello world" *How do I run applications inside containers?* @@ -38,7 +38,7 @@ applications. To learn how to Dockerize applications and run them: Go to [Dockerizing Applications](/userguide/dockerizing). -## Working with Containers +## Working with containers *How do I manage my containers?* @@ -48,7 +48,7 @@ about how to inspect, monitor and manage containers: Go to [Working With Containers](/userguide/usingdocker). -## Working with Docker Images +## Working with Docker images *How can I access, share and build my own images?* @@ -57,7 +57,7 @@ learn how to build your own application images with Docker. Go to [Working with Docker Images](/userguide/dockerimages). -## Linking Containers Together +## Linking containers together Until now we've seen how to build individual applications inside Docker containers. Now learn how to build whole application stacks with Docker @@ -65,7 +65,7 @@ by linking together multiple Docker containers. Go to [Linking Containers Together](/userguide/dockerlinks). -## Managing Data in Containers +## Managing data in containers Now we know how to link Docker containers together the next step is learning how to manage data, volumes and mounts inside our containers. diff --git a/docs/sources/userguide/level1.md b/docs/sources/userguide/level1.md index cca77dc36291d..320fbfee01be3 100644 --- a/docs/sources/userguide/level1.md +++ b/docs/sources/userguide/level1.md @@ -1,10 +1,10 @@ -page_title: Docker Images Test +page_title: Docker images test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration Back -# Dockerfile Tutorial +# Dockerfile tutorial ## Test your Dockerfile knowledge - Level 1 diff --git a/docs/sources/userguide/level2.md b/docs/sources/userguide/level2.md index fe6654e71f034..96e91a1c6c743 100644 --- a/docs/sources/userguide/level2.md +++ b/docs/sources/userguide/level2.md @@ -1,10 +1,10 @@ -page_title: Docker Images Test +page_title: Docker images test page_description: How to work with Docker images. page_keywords: documentation, docs, the docker guide, docker guide, docker, docker platform, virtualization framework, docker.io, Docker images, Docker image, image management, Docker repos, Docker repositories, docker, docker tag, docker tags, Docker Hub, collaboration Back -#Dockerfile Tutorial +#Dockerfile tutorial ## Test your Dockerfile knowledge - Level 2 diff --git a/docs/sources/userguide/usingdocker.md b/docs/sources/userguide/usingdocker.md index 70996a21004b7..e33ca717d67a6 100644 --- a/docs/sources/userguide/usingdocker.md +++ b/docs/sources/userguide/usingdocker.md @@ -1,8 +1,8 @@ -page_title: Working with Containers +page_title: Working with containers page_description: Learn how to manage and operate Docker containers. page_keywords: docker, the docker guide, documentation, docker.io, monitoring containers, docker top, docker inspect, docker port, ports, docker logs, log, Logs -# Working with Containers +# Working with containers In the [last section of the Docker User Guide](/userguide/dockerizing) we launched our first containers. We launched two containers using the @@ -91,7 +91,7 @@ This will display the help text and all available flags: > You can see a full list of Docker's commands > [here](/reference/commandline/cli/). -## Running a Web Application in Docker +## Running a web application in Docker So now we've learnt a bit more about the `docker` client let's move onto the important stuff: running more containers. So far none of the @@ -121,7 +121,7 @@ Lastly, we've specified a command for our container to run: `python app.py`. Thi > reference](/reference/commandline/cli/#run) and the [Docker Run > Reference](/reference/run/). -## Viewing our Web Application Container +## Viewing our web application container Now let's see our running container using the `docker ps` command. @@ -189,7 +189,7 @@ Our Python application is live! > > In this case you'd browse to http://192.168.59.103:49155 for the above example. -## A Network Port Shortcut +## A network port shortcut Using the `docker ps` command to return the mapped port is a bit clumsy so Docker has a useful shortcut we can use: `docker port`. To use `docker port` we @@ -202,7 +202,7 @@ corresponding public-facing port. In this case we've looked up what port is mapped externally to port 5000 inside the container. -## Viewing the Web Application's Logs +## Viewing the web application's logs Let's also find out a bit more about what's happening with our application and use another of the commands we've learnt, `docker logs`. @@ -217,7 +217,7 @@ logs` command to act like the `tail -f` command and watch the container's standard out. We can see here the logs from Flask showing the application running on port 5000 and the access log entries for it. -## Looking at our Web Application Container's processes +## Looking at our web application container's processes In addition to the container's logs we can also examine the processes running inside it using the `docker top` command. @@ -229,7 +229,7 @@ running inside it using the `docker top` command. Here we can see our `python app.py` command is the only process running inside the container. -## Inspecting our Web Application Container +## Inspecting our web application container Lastly, we can take a low-level dive into our Docker container using the `docker inspect` command. It returns a JSON hash of useful configuration @@ -258,7 +258,7 @@ specific element, for example to return the container's IP address we would: $ docker inspect -f '{{ .NetworkSettings.IPAddress }}' nostalgic_morse 172.17.0.5 -## Stopping our Web Application Container +## Stopping our web application container Okay we've seen web application working. Now let's stop it using the `docker stop` command and the name of our container: `nostalgic_morse`. @@ -271,7 +271,7 @@ been stopped. $ docker ps -l -## Restarting our Web Application Container +## Restarting our web application container Oops! Just after you stopped the container you get a call to say another developer needs the container back. From here you have two choices: you @@ -289,7 +289,7 @@ responds. > Also available is the `docker restart` command that runs a stop and > then start on the container. -## Removing our Web Application Container +## Removing our web application container Your colleague has let you know that they've now finished with the container and won't need it again. So let's remove it using the `docker rm` command. diff --git a/engine/engine.go b/engine/engine.go index 1090675dfa557..79fae51cc3848 100644 --- a/engine/engine.go +++ b/engine/engine.go @@ -11,7 +11,7 @@ import ( "time" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/stringid" ) // Installer is a standard interface for objects which can "install" themselves @@ -78,7 +78,7 @@ func (eng *Engine) RegisterCatchall(catchall Handler) { func New() *Engine { eng := &Engine{ handlers: make(map[string]Handler), - id: stringutils.GenerateRandomString(), + id: stringid.GenerateRandomID(), Stdout: os.Stdout, Stderr: os.Stderr, Stdin: os.Stdin, diff --git a/engine/env.go b/engine/env.go index c6c673271e9f0..107ae4a0d9160 100644 --- a/engine/env.go +++ b/engine/env.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/docker/docker/utils" + "github.com/docker/docker/pkg/ioutils" ) type Env []string @@ -210,7 +210,7 @@ func (env *Env) SetAuto(k string, v interface{}) { // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + // (See https://golang.org/src/pkg/encoding/json/decode.go#L46) if fval, ok := v.(float64); ok { env.SetInt64(k, int64(fval)) } else if sval, ok := v.(string); ok { @@ -245,7 +245,7 @@ func (env *Env) Encode(dst io.Writer) error { if err := json.Unmarshal([]byte(v), &val); err == nil { // FIXME: we fix-convert float values to int, because // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) + // (See https://golang.org/src/pkg/encoding/json/decode.go#L46) m[k] = changeFloats(val) } else { m[k] = v @@ -258,7 +258,7 @@ func (env *Env) Encode(dst io.Writer) error { } func (env *Env) WriteTo(dst io.Writer) (int64, error) { - wc := utils.NewWriteCounter(dst) + wc := ioutils.NewWriteCounter(dst) err := env.Encode(wc) return wc.Count, err } diff --git a/engine/shutdown_test.go b/engine/shutdown_test.go index cde177e398d6f..d2ef0339de399 100644 --- a/engine/shutdown_test.go +++ b/engine/shutdown_test.go @@ -18,9 +18,7 @@ func TestShutdownEmpty(t *testing.T) { func TestShutdownAfterRun(t *testing.T) { eng := New() - var called bool eng.Register("foo", func(job *Job) error { - called = true return nil }) if err := eng.Job("foo").Run(); err != nil { diff --git a/engine/streams_test.go b/engine/streams_test.go index 476a721baf71e..c22338a32e752 100644 --- a/engine/streams_test.go +++ b/engine/streams_test.go @@ -182,7 +182,7 @@ func TestInputAddEmpty(t *testing.T) { t.Fatal(err) } if len(data) > 0 { - t.Fatalf("Read from empty input shoul yield no data") + t.Fatalf("Read from empty input should yield no data") } } diff --git a/graph/export.go b/graph/export.go index f689ba10efbe0..ae061a8a0fdc8 100644 --- a/graph/export.go +++ b/graph/export.go @@ -2,14 +2,12 @@ package graph import ( "encoding/json" - "fmt" "io" "io/ioutil" "os" "path" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/registry" @@ -20,10 +18,13 @@ import ( // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. -func (s *TagStore) CmdImageExport(job *engine.Job) error { - if len(job.Args) < 1 { - return fmt.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name) - } +type ImageExportConfig struct { + Names []string + Outstream io.Writer +} + +func (s *TagStore) ImageExport(imageExportConfig *ImageExportConfig) error { + // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { @@ -40,7 +41,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) error { repo[tag] = id } } - for _, name := range job.Args { + for _, name := range imageExportConfig.Names { name = registry.NormalizeLocalName(name) logrus.Debugf("Serializing %s", name) rootRepo := s.Repositories[name] @@ -48,7 +49,7 @@ func (s *TagStore) CmdImageExport(job *engine.Job) error { // this is a base repo name, like 'busybox' for tag, id := range rootRepo { addKey(name, tag, id) - if err := s.exportImage(job.Eng, id, tempdir); err != nil { + if err := s.exportImage(id, tempdir); err != nil { return err } } @@ -67,13 +68,13 @@ func (s *TagStore) CmdImageExport(job *engine.Job) error { if len(repoTag) > 0 { addKey(repoName, repoTag, img.ID) } - if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { + if err := s.exportImage(img.ID, tempdir); err != nil { return err } } else { // this must be an ID that didn't get looked up just right? - if err := s.exportImage(job.Eng, name, tempdir); err != nil { + if err := s.exportImage(name, tempdir); err != nil { return err } } @@ -82,8 +83,15 @@ func (s *TagStore) CmdImageExport(job *engine.Job) error { } // write repositories, if there is something to write if len(rootRepoMap) > 0 { - rootRepoJson, _ := json.Marshal(rootRepoMap) - if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { + f, err := os.OpenFile(path.Join(tempdir, "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + f.Close() + return err + } + if err := json.NewEncoder(f).Encode(rootRepoMap); err != nil { + return err + } + if err := f.Close(); err != nil { return err } } else { @@ -96,15 +104,15 @@ func (s *TagStore) CmdImageExport(job *engine.Job) error { } defer fs.Close() - if _, err := io.Copy(job.Stdout, fs); err != nil { + if _, err := io.Copy(imageExportConfig.Outstream, fs); err != nil { return err } - logrus.Debugf("End export job: %s", job.Name) + logrus.Debugf("End export image") return nil } // FIXME: this should be a top-level function, not a class method -func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { +func (s *TagStore) exportImage(name, tempdir string) error { for n := name; n != ""; { // temporary directory tmpImageDir := path.Join(tempdir, n) @@ -127,31 +135,33 @@ func (s *TagStore) exportImage(eng *engine.Engine, name, tempdir string) error { if err != nil { return err } - job := eng.Job("image_inspect", n) - job.SetenvBool("raw", true) - job.Stdout.Add(json) - if err := job.Run(); err != nil { + imageInspectRaw, err := s.LookupRaw(n) + if err != nil { return err } + written, err := json.Write(imageInspectRaw) + if err != nil { + return err + } + if written != len(imageInspectRaw) { + logrus.Warnf("%d byes should have been written instead %d have been written", written, len(imageInspectRaw)) + } // serialize filesystem fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } - job = eng.Job("image_tarlayer", n) - job.Stdout.Add(fsTar) - if err := job.Run(); err != nil { + if err := s.ImageTarLayer(n, fsTar); err != nil { return err } // find parent - job = eng.Job("image_get", n) - info, _ := job.Stdout.AddEnv() - if err := job.Run(); err != nil { + img, err := s.LookupImage(n) + if err != nil { return err } - n = info.Get("Parent") + n = img.Parent } return nil } diff --git a/graph/graph.go b/graph/graph.go index 087a6f093cede..9b2d7c2ee9634 100644 --- a/graph/graph.go +++ b/graph/graph.go @@ -25,7 +25,6 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/truncindex" "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" ) // A Graph is a store for versioned filesystem images and the relationship between them. @@ -154,7 +153,7 @@ func (graph *Graph) Register(img *image.Image, layerData archive.ArchiveReader) graph.driver.Remove(img.ID) } }() - if err := utils.ValidateID(img.ID); err != nil { + if err := image.ValidateID(img.ID); err != nil { return err } // (This is a convenience to save time. Race conditions are taken care of by os.Rename) @@ -349,9 +348,8 @@ func (graph *Graph) Delete(name string) error { tmp, err := graph.Mktemp("") graph.idIndex.Delete(id) if err == nil { - err = os.Rename(graph.ImageRoot(id), tmp) - // On err make tmp point to old dir and cleanup unused tmp dir - if err != nil { + if err := os.Rename(graph.ImageRoot(id), tmp); err != nil { + // On err make tmp point to old dir and cleanup unused tmp dir os.RemoveAll(tmp) tmp = graph.ImageRoot(id) } diff --git a/integration/graph_test.go b/graph/graph_test.go similarity index 92% rename from integration/graph_test.go rename to graph/graph_test.go index a481154551f19..81471b6749457 100644 --- a/integration/graph_test.go +++ b/graph/graph_test.go @@ -1,4 +1,4 @@ -package docker +package graph import ( "errors" @@ -11,11 +11,8 @@ import ( "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/graph" "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/utils" ) func TestMount(t *testing.T) { @@ -48,6 +45,7 @@ func TestMount(t *testing.T) { if _, err := driver.Get(image.ID, ""); err != nil { t.Fatal(err) } + } func TestInit(t *testing.T) { @@ -103,7 +101,7 @@ func TestGraphCreate(t *testing.T) { if err != nil { t.Fatal(err) } - if err := utils.ValidateID(img.ID); err != nil { + if err := image.ValidateID(img.ID); err != nil { t.Fatal(err) } if img.Comment != "Testing" { @@ -167,18 +165,6 @@ func TestDeletePrefix(t *testing.T) { assertNImages(graph, t, 0) } -func createTestImage(graph *graph.Graph, t *testing.T) *image.Image { - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) - if err != nil { - t.Fatal(err) - } - return img -} - func TestDelete(t *testing.T) { graph, _ := tempGraph(t) defer nukeGraph(graph) @@ -278,11 +264,19 @@ func TestByParent(t *testing.T) { } } -/* - * HELPER FUNCTIONS - */ +func createTestImage(graph *Graph, t *testing.T) *image.Image { + archive, err := fakeTar() + if err != nil { + t.Fatal(err) + } + img, err := graph.Create(archive, "", "", "Test image", "", nil, nil) + if err != nil { + t.Fatal(err) + } + return img +} -func assertNImages(graph *graph.Graph, t *testing.T, n int) { +func assertNImages(graph *Graph, t *testing.T, n int) { if images, err := graph.Map(); err != nil { t.Fatal(err) } else if actualN := len(images); actualN != n { @@ -290,7 +284,7 @@ func assertNImages(graph *graph.Graph, t *testing.T, n int) { } } -func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { +func tempGraph(t *testing.T) (*Graph, graphdriver.Driver) { tmp, err := ioutil.TempDir("", "docker-graph-") if err != nil { t.Fatal(err) @@ -299,22 +293,14 @@ func tempGraph(t *testing.T) (*graph.Graph, graphdriver.Driver) { if err != nil { t.Fatal(err) } - graph, err := graph.NewGraph(tmp, driver) + graph, err := NewGraph(tmp, driver) if err != nil { t.Fatal(err) } return graph, driver } -func nukeGraph(graph *graph.Graph) { +func nukeGraph(graph *Graph) { graph.Driver().Cleanup() os.RemoveAll(graph.Root) } - -func testArchive(t *testing.T) archive.Archive { - archive, err := fakeTar() - if err != nil { - t.Fatal(err) - } - return archive -} diff --git a/graph/history.go b/graph/history.go index 1290de9a30781..56e759a8ebb02 100644 --- a/graph/history.go +++ b/graph/history.go @@ -1,24 +1,17 @@ package graph import ( - "encoding/json" - "fmt" "strings" "github.com/docker/docker/api/types" - "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/utils" ) -func (s *TagStore) CmdHistory(job *engine.Job) error { - if n := len(job.Args); n != 1 { - return fmt.Errorf("Usage: %s IMAGE", job.Name) - } - name := job.Args[0] +func (s *TagStore) History(name string) ([]*types.ImageHistory, error) { foundImage, err := s.LookupImage(name) if err != nil { - return err + return nil, err } lookupMap := make(map[string][]string) @@ -32,22 +25,19 @@ func (s *TagStore) CmdHistory(job *engine.Job) error { } } - history := []types.ImageHistory{} + history := []*types.ImageHistory{} err = foundImage.WalkHistory(func(img *image.Image) error { - history = append(history, types.ImageHistory{ + history = append(history, &types.ImageHistory{ ID: img.ID, Created: img.Created.Unix(), - CreatedBy: strings.Join(img.ContainerConfig.Cmd, " "), + CreatedBy: strings.Join(img.ContainerConfig.Cmd.Slice(), " "), Tags: lookupMap[img.ID], Size: img.Size, + Comment: img.Comment, }) return nil }) - if err = json.NewEncoder(job.Stdout).Encode(history); err != nil { - return err - } - - return nil + return history, err } diff --git a/graph/import.go b/graph/import.go index eb63af0b6046b..50e605c948e1e 100644 --- a/graph/import.go +++ b/graph/import.go @@ -1,40 +1,35 @@ package graph import ( - "bytes" - "encoding/json" - "fmt" + "io" "net/http" "net/url" - "github.com/docker/docker/engine" "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" ) -func (s *TagStore) CmdImport(job *engine.Job) error { - if n := len(job.Args); n != 2 && n != 3 { - return fmt.Errorf("Usage: %s SRC REPO [TAG]", job.Name) - } +type ImageImportConfig struct { + Changes []string + InConfig io.ReadCloser + Json bool + OutStream io.Writer + ContainerConfig *runconfig.Config +} + +func (s *TagStore) Import(src string, repo string, tag string, imageImportConfig *ImageImportConfig) error { var ( - src = job.Args[0] - repo = job.Args[1] - tag string - sf = streamformatter.NewStreamFormatter(job.GetenvBool("json")) - archive archive.ArchiveReader - resp *http.Response - stdoutBuffer = bytes.NewBuffer(nil) - newConfig runconfig.Config + sf = streamformatter.NewStreamFormatter(imageImportConfig.Json) + archive archive.ArchiveReader + resp *http.Response ) - if len(job.Args) > 2 { - tag = job.Args[2] - } if src == "-" { - archive = job.Stdin + archive = imageImportConfig.InConfig } else { u, err := url.Parse(src) if err != nil { @@ -45,14 +40,14 @@ func (s *TagStore) CmdImport(job *engine.Job) error { u.Host = src u.Path = "" } - job.Stdout.Write(sf.FormatStatus("", "Downloading from %s", u)) - resp, err = utils.Download(u.String()) + imageImportConfig.OutStream.Write(sf.FormatStatus("", "Downloading from %s", u)) + resp, err = httputils.Download(u.String()) if err != nil { return err } progressReader := progressreader.New(progressreader.Config{ In: resp.Body, - Out: job.Stdout, + Out: imageImportConfig.OutStream, Formatter: sf, Size: int(resp.ContentLength), NewLines: true, @@ -63,30 +58,17 @@ func (s *TagStore) CmdImport(job *engine.Job) error { archive = progressReader } - buildConfigJob := job.Eng.Job("build_config") - buildConfigJob.Stdout.Add(stdoutBuffer) - buildConfigJob.Setenv("changes", job.Getenv("changes")) - // FIXME this should be remove when we remove deprecated config param - buildConfigJob.Setenv("config", job.Getenv("config")) - - if err := buildConfigJob.Run(); err != nil { - return err - } - if err := json.NewDecoder(stdoutBuffer).Decode(&newConfig); err != nil { - return err - } - - img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, &newConfig) + img, err := s.graph.Create(archive, "", "", "Imported from "+src, "", nil, imageImportConfig.ContainerConfig) if err != nil { return err } // Optionally register the image at REPO/TAG if repo != "" { - if err := s.Set(repo, tag, img.ID, true); err != nil { + if err := s.Tag(repo, tag, img.ID, true); err != nil { return err } } - job.Stdout.Write(sf.FormatStatus("", img.ID)) + imageImportConfig.OutStream.Write(sf.FormatStatus("", img.ID)) logID := img.ID if tag != "" { logID = utils.ImageReference(logID, tag) diff --git a/graph/load.go b/graph/load.go index ace222e3fec3f..d978b1ee8e8ce 100644 --- a/graph/load.go +++ b/graph/load.go @@ -4,21 +4,20 @@ package graph import ( "encoding/json" + "io" "io/ioutil" "os" "path" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" "github.com/docker/docker/image" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/utils" ) // Loads a set of images into the repository. This is the complementary of ImageExport. // The input stream is an uncompressed tar ball containing images and metadata. -func (s *TagStore) CmdLoad(job *engine.Job) error { +func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { return err @@ -42,7 +41,7 @@ func (s *TagStore) CmdLoad(job *engine.Job) error { excludes[i] = k i++ } - if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil { + if err := chrootarchive.Untar(inTar, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil { return err } @@ -53,35 +52,39 @@ func (s *TagStore) CmdLoad(job *engine.Job) error { for _, d := range dirs { if d.IsDir() { - if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { + if err := s.recursiveLoad(d.Name(), tmpImageDir); err != nil { return err } } } - repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) - if err == nil { - repositories := map[string]Repository{} - if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { + reposJSONFile, err := os.Open(path.Join(tmpImageDir, "repo", "repositories")) + if err != nil { + if !os.IsNotExist(err) { return err } + return nil + } + defer reposJSONFile.Close() - for imageName, tagMap := range repositories { - for tag, address := range tagMap { - if err := s.SetLoad(imageName, tag, address, true, job.Stdout); err != nil { - return err - } + repositories := map[string]Repository{} + if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil { + return err + } + + for imageName, tagMap := range repositories { + for tag, address := range tagMap { + if err := s.SetLoad(imageName, tag, address, true, outStream); err != nil { + return err } } - } else if !os.IsNotExist(err) { - return err } return nil } -func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error { - if err := eng.Job("image_get", address).Run(); err != nil { +func (s *TagStore) recursiveLoad(address, tmpImageDir string) error { + if _, err := s.LookupImage(address); err != nil { logrus.Debugf("Loading %s", address) imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json")) @@ -100,7 +103,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string logrus.Debugf("Error unmarshalling json", err) return err } - if err := utils.ValidateID(img.ID); err != nil { + if err := image.ValidateID(img.ID); err != nil { logrus.Debugf("Error validating ID: %s", err) return err } @@ -120,7 +123,7 @@ func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string if img.Parent != "" { if !s.graph.Exists(img.Parent) { - if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil { + if err := s.recursiveLoad(img.Parent, tmpImageDir); err != nil { return err } } diff --git a/graph/load_unsupported.go b/graph/load_unsupported.go index 707534480f8d1..7c515596962e9 100644 --- a/graph/load_unsupported.go +++ b/graph/load_unsupported.go @@ -4,10 +4,9 @@ package graph import ( "fmt" - - "github.com/docker/docker/engine" + "io" ) -func (s *TagStore) CmdLoad(job *engine.Job) error { - return fmt.Errorf("CmdLoad is not supported on this platform") +func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error { + return fmt.Errorf("Load is not supported on this platform") } diff --git a/graph/manifest.go b/graph/manifest.go index 7e9281537e3bd..053a185ba5289 100644 --- a/graph/manifest.go +++ b/graph/manifest.go @@ -1,14 +1,13 @@ package graph import ( - "bytes" "encoding/json" "fmt" "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - "github.com/docker/docker/engine" "github.com/docker/docker/registry" + "github.com/docker/docker/trust" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) @@ -18,7 +17,7 @@ import ( // contains no signatures by a trusted key for the name in the manifest, the // image is not considered verified. The parsed manifest object and a boolean // for whether the manifest is verified is returned. -func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) { +func (s *TagStore) loadManifest(manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) { sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures") if err != nil { return nil, false, fmt.Errorf("error parsing payload: %s", err) @@ -69,32 +68,28 @@ func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, var verified bool for _, key := range keys { - job := eng.Job("trust_key_check") - b, err := key.MarshalJSON() - if err != nil { - return nil, false, fmt.Errorf("error marshalling public key: %s", err) - } namespace := manifest.Name if namespace[0] != '/' { namespace = "/" + namespace } - stdoutBuffer := bytes.NewBuffer(nil) - - job.Args = append(job.Args, namespace) - job.Setenv("PublicKey", string(b)) + b, err := key.MarshalJSON() + if err != nil { + return nil, false, fmt.Errorf("error marshalling public key: %s", err) + } // Check key has read/write permission (0x03) - job.SetenvInt("Permission", 0x03) - job.Stdout.Add(stdoutBuffer) - if err = job.Run(); err != nil { - return nil, false, fmt.Errorf("error running key check: %s", err) + v, err := s.trustService.CheckKey(namespace, b, 0x03) + if err != nil { + vErr, ok := err.(trust.NotVerifiedError) + if !ok { + return nil, false, fmt.Errorf("error running key check: %s", err) + } + logrus.Debugf("Key check result: %v", vErr) } - result := engine.Tail(stdoutBuffer, 1) - logrus.Debugf("Key check result: %q", result) - if result == "verified" { - verified = true + verified = v + if verified { + logrus.Debug("Key check result: verified") } } - return &manifest, verified, nil } diff --git a/graph/manifest_test.go b/graph/manifest_test.go index 9137041827b33..2702dcaf560aa 100644 --- a/graph/manifest_test.go +++ b/graph/manifest_test.go @@ -135,7 +135,7 @@ func TestManifestTarsumCache(t *testing.T) { if err := store.graph.Register(img, archive); err != nil { t.Fatal(err) } - if err := store.Set(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil { + if err := store.Tag(testManifestImageName, testManifestTag, testManifestImageID, false); err != nil { t.Fatal(err) } diff --git a/graph/pull.go b/graph/pull.go index 13c69858fd0db..c3c064fc5842a 100644 --- a/graph/pull.go +++ b/graph/pull.go @@ -12,7 +12,7 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - "github.com/docker/docker/engine" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/image" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" @@ -21,37 +21,34 @@ import ( "github.com/docker/docker/utils" ) -func (s *TagStore) CmdPull(job *engine.Job) error { - if n := len(job.Args); n != 1 && n != 2 { - return fmt.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name) - } +type ImagePullConfig struct { + Parallel bool + MetaHeaders map[string][]string + AuthConfig *cliconfig.AuthConfig + Json bool + OutStream io.Writer +} +func (s *TagStore) Pull(image string, tag string, imagePullConfig *ImagePullConfig) error { var ( - localName = job.Args[0] - tag string - sf = streamformatter.NewStreamFormatter(job.GetenvBool("json")) - authConfig = ®istry.AuthConfig{} - metaHeaders map[string][]string + sf = streamformatter.NewStreamFormatter(imagePullConfig.Json) ) // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := s.registryService.ResolveRepository(localName) + repoInfo, err := s.registryService.ResolveRepository(image) if err != nil { return err } - if len(job.Args) > 1 { - tag = job.Args[1] + if err := validateRepoName(repoInfo.LocalName); err != nil { + return err } - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", &metaHeaders) - c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag)) if err != nil { if c != nil { // Another pull of the same repository is already taking place; just wait for it to finish - job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName)) + imagePullConfig.OutStream.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName)) <-c return nil } @@ -65,7 +62,7 @@ func (s *TagStore) CmdPull(job *engine.Job) error { return err } - r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true) + r, err := registry.NewSession(imagePullConfig.AuthConfig, registry.HTTPRequestFactory(imagePullConfig.MetaHeaders), endpoint, true) if err != nil { return err } @@ -77,14 +74,11 @@ func (s *TagStore) CmdPull(job *engine.Job) error { if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) { if repoInfo.Official { - j := job.Eng.Job("trust_update_base") - if err = j.Run(); err != nil { - logrus.Errorf("error updating trust base graph: %s", err) - } + s.trustService.UpdateBase() } logrus.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName) - if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil { + if err := s.pullV2Repository(r, imagePullConfig.OutStream, repoInfo, tag, sf, imagePullConfig.Parallel); err == nil { s.eventsService.Log("pull", logName, "") return nil } else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable { @@ -95,7 +89,7 @@ func (s *TagStore) CmdPull(job *engine.Job) error { } logrus.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName) - if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil { + if err = s.pullRepository(r, imagePullConfig.OutStream, repoInfo, tag, sf, imagePullConfig.Parallel); err != nil { return err } @@ -249,7 +243,7 @@ func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo * if askedTag != "" && tag != askedTag { continue } - if err := s.Set(repoInfo.LocalName, tag, id, true); err != nil { + if err := s.Tag(repoInfo.LocalName, tag, id, true); err != nil { return err } } @@ -379,7 +373,7 @@ type downloadInfo struct { err chan error } -func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool) error { +func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { @@ -403,14 +397,14 @@ func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out return registry.ErrDoesNotExist } for _, t := range tags { - if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil { + if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } } else { - if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil { + if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true @@ -425,7 +419,7 @@ func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out return nil } -func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { +func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { logrus.Debugf("Pulling tag from V2 registry: %q", tag) manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) @@ -435,7 +429,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri // loadManifest ensures that the manifest payload has the expected digest // if the tag is a digest reference. - manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag) + manifest, verified, err := s.loadManifest(manifestBytes, manifestDigest, tag) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } @@ -495,7 +489,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri return err } - r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth) + r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth) if err != nil { return err } @@ -543,8 +537,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri di.err <- downloadFunc(di) }(&downloads[i]) } else { - err := downloadFunc(&downloads[i]) - if err != nil { + if err := downloadFunc(&downloads[i]); err != nil { return false, err } } @@ -554,8 +547,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { - err := <-d.err - if err != nil { + if err := <-d.err; err != nil { return false, err } } @@ -617,7 +609,7 @@ func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Wri } } else { // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) - if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { + if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { return false, err } } diff --git a/graph/push.go b/graph/push.go index a542dbf812c24..1b33288d8fedf 100644 --- a/graph/push.go +++ b/graph/push.go @@ -8,11 +8,11 @@ import ( "io/ioutil" "os" "path" - "strings" "sync" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" + "github.com/docker/distribution/digest" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/image" "github.com/docker/docker/pkg/progressreader" "github.com/docker/docker/pkg/streamformatter" @@ -25,6 +25,14 @@ import ( var ErrV2RegistryUnavailable = errors.New("error v2 registry unavailable") +type ImagePushConfig struct { + MetaHeaders map[string][]string + AuthConfig *cliconfig.AuthConfig + Tag string + Json bool + OutStream io.Writer +} + // Retrieve the all the images to be uploaded in the correct order func (s *TagStore) getImageList(localRepo map[string]string, requestedTag string) ([]string, map[string][]string, error) { var ( @@ -359,8 +367,7 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o logrus.Debugf("Pushing layer: %s", layer.ID) if layer.Config != nil && metadata.Image != layer.ID { - err = runconfig.Merge(&metadata, layer.Config) - if err != nil { + if err := runconfig.Merge(&metadata, layer.Config); err != nil { return err } } @@ -376,13 +383,13 @@ func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, o var exists bool if len(checksum) > 0 { - sumParts := strings.SplitN(checksum, ":", 2) - if len(sumParts) < 2 { - return fmt.Errorf("Invalid checksum: %s", checksum) + dgst, err := digest.ParseDigest(checksum) + if err != nil { + return fmt.Errorf("Invalid checksum %s: %s", checksum, err) } // Call mount blob - exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth) + exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, dgst, auth) if err != nil { out.Write(sf.FormatProgress(stringid.TruncateID(layer.ID), "Image push failed", nil)) return err @@ -468,7 +475,7 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint * // Send the layer logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size) - if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(), + if err := r.PutV2ImageBlob(endpoint, imageName, dgst, progressreader.New(progressreader.Config{ In: tf, Out: out, @@ -486,15 +493,9 @@ func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint * } // FIXME: Allow to interrupt current push when new push of same image is done. -func (s *TagStore) CmdPush(job *engine.Job) error { - if n := len(job.Args); n != 1 { - return fmt.Errorf("Usage: %s IMAGE", job.Name) - } +func (s *TagStore) Push(localName string, imagePushConfig *ImagePushConfig) error { var ( - localName = job.Args[0] - sf = streamformatter.NewStreamFormatter(job.GetenvBool("json")) - authConfig = ®istry.AuthConfig{} - metaHeaders map[string][]string + sf = streamformatter.NewStreamFormatter(imagePushConfig.Json) ) // Resolve the Repository name from fqn to RepositoryInfo @@ -503,10 +504,6 @@ func (s *TagStore) CmdPush(job *engine.Job) error { return err } - tag := job.Getenv("tag") - job.GetenvJson("authConfig", authConfig) - job.GetenvJson("metaHeaders", &metaHeaders) - if _, err := s.poolAdd("push", repoInfo.LocalName); err != nil { return err } @@ -517,16 +514,18 @@ func (s *TagStore) CmdPush(job *engine.Job) error { return err } - r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, false) + r, err := registry.NewSession(imagePushConfig.AuthConfig, registry.HTTPRequestFactory(imagePushConfig.MetaHeaders), endpoint, false) if err != nil { return err } reposLen := 1 - if tag == "" { + if imagePushConfig.Tag == "" { reposLen = len(s.Repositories[repoInfo.LocalName]) } - job.Stdout.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) + + imagePushConfig.OutStream.Write(sf.FormatStatus("", "The push refers to a repository [%s] (len: %d)", repoInfo.CanonicalName, reposLen)) + // If it fails, try to get the repository localRepo, exists := s.Repositories[repoInfo.LocalName] if !exists { @@ -534,8 +533,9 @@ func (s *TagStore) CmdPush(job *engine.Job) error { } if repoInfo.Index.Official || endpoint.Version == registry.APIVersion2 { - err := s.pushV2Repository(r, localRepo, job.Stdout, repoInfo, tag, sf) + err := s.pushV2Repository(r, localRepo, imagePushConfig.OutStream, repoInfo, imagePushConfig.Tag, sf) if err == nil { + s.eventsService.Log("push", repoInfo.LocalName, "") return nil } @@ -544,9 +544,10 @@ func (s *TagStore) CmdPush(job *engine.Job) error { } } - if err := s.pushRepository(r, job.Stdout, repoInfo, localRepo, tag, sf); err != nil { + if err := s.pushRepository(r, imagePushConfig.OutStream, repoInfo, localRepo, imagePushConfig.Tag, sf); err != nil { return err } + s.eventsService.Log("push", repoInfo.LocalName, "") return nil } diff --git a/graph/service.go b/graph/service.go index a51d106e1389d..52dde1d980504 100644 --- a/graph/service.go +++ b/graph/service.go @@ -5,163 +5,51 @@ import ( "io" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" - "github.com/docker/docker/image" + "github.com/docker/docker/api/types" ) -func (s *TagStore) Install(eng *engine.Engine) error { - for name, handler := range map[string]engine.Handler{ - "image_set": s.CmdSet, - "tag": s.CmdTag, - "image_get": s.CmdGet, - "image_inspect": s.CmdLookup, - "image_tarlayer": s.CmdTarLayer, - "image_export": s.CmdImageExport, - "history": s.CmdHistory, - "viz": s.CmdViz, - "load": s.CmdLoad, - "import": s.CmdImport, - "pull": s.CmdPull, - "push": s.CmdPush, - } { - if err := eng.Register(name, handler); err != nil { - return fmt.Errorf("Could not register %q: %v", name, err) - } +func (s *TagStore) LookupRaw(name string) ([]byte, error) { + image, err := s.LookupImage(name) + if err != nil || image == nil { + return nil, fmt.Errorf("No such image %s", name) } - return nil -} -// CmdSet stores a new image in the graph. -// Images are stored in the graph using 4 elements: -// - A user-defined ID -// - A collection of metadata describing the image -// - A directory tree stored as a tar archive (also called the "layer") -// - A reference to a "parent" ID on top of which the layer should be applied -// -// NOTE: even though the parent ID is only useful in relation to the layer and how -// to apply it (ie you could represent the full directory tree as 'parent_layer + layer', -// it is treated as a top-level property of the image. This is an artifact of early -// design and should probably be cleaned up in the future to simplify the design. -// -// Syntax: image_set ID -// Input: -// - Layer content must be streamed in tar format on stdin. An empty input is -// valid and represents a nil layer. -// -// - Image metadata must be passed in the command environment. -// 'json': a json-encoded object with all image metadata. -// It will be stored as-is, without any encoding/decoding artifacts. -// That is a requirement of the current registry client implementation, -// because a re-encoded json might invalidate the image checksum at -// the next upload, even with functionaly identical content. -func (s *TagStore) CmdSet(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s NAME", job.Name) - } - var ( - imgJSON = []byte(job.Getenv("json")) - layer = job.Stdin - ) - if len(imgJSON) == 0 { - return fmt.Errorf("mandatory key 'json' is not set") - } - // We have to pass an *image.Image object, even though it will be completely - // ignored in favor of the redundant json data. - // FIXME: the current prototype of Graph.Register is redundant. - img, err := image.NewImgJSON(imgJSON) + imageInspectRaw, err := image.RawJson() if err != nil { - return err - } - if err := s.graph.Register(img, layer); err != nil { - return err + return nil, err } - return nil -} -// CmdGet returns information about an image. -// If the image doesn't exist, an empty object is returned, to allow -// checking for an image's existence. -func (s *TagStore) CmdGet(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s NAME", job.Name) - } - name := job.Args[0] - res := &engine.Env{} - img, err := s.LookupImage(name) - // Note: if the image doesn't exist, LookupImage returns - // nil, nil. - if err != nil { - return err - } - if img != nil { - // We don't directly expose all fields of the Image objects, - // to maintain a clean public API which we can maintain over - // time even if the underlying structure changes. - // We should have done this with the Image object to begin with... - // but we didn't, so now we're doing it here. - // - // Fields that we're probably better off not including: - // - Config/ContainerConfig. Those structs have the same sprawl problem, - // so we shouldn't include them wholesale either. - // - Comment: initially created to fulfill the "every image is a git commit" - // metaphor, in practice people either ignore it or use it as a - // generic description field which it isn't. On deprecation shortlist. - res.SetAuto("Created", img.Created) - res.SetJson("Author", img.Author) - res.Set("Os", img.OS) - res.Set("Architecture", img.Architecture) - res.Set("DockerVersion", img.DockerVersion) - res.SetJson("Id", img.ID) - res.SetJson("Parent", img.Parent) - } - res.WriteTo(job.Stdout) - return nil + return imageInspectRaw, nil } -// CmdLookup return an image encoded in JSON -func (s *TagStore) CmdLookup(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s NAME", job.Name) +// Lookup return an image encoded in JSON +func (s *TagStore) Lookup(name string) (*types.ImageInspect, error) { + image, err := s.LookupImage(name) + if err != nil || image == nil { + return nil, fmt.Errorf("No such image: %s", name) } - name := job.Args[0] - if image, err := s.LookupImage(name); err == nil && image != nil { - if job.GetenvBool("raw") { - b, err := image.RawJson() - if err != nil { - return err - } - job.Stdout.Write(b) - return nil - } - out := &engine.Env{} - out.SetJson("Id", image.ID) - out.SetJson("Parent", image.Parent) - out.SetJson("Comment", image.Comment) - out.SetAuto("Created", image.Created) - out.SetJson("Container", image.Container) - out.SetJson("ContainerConfig", image.ContainerConfig) - out.Set("DockerVersion", image.DockerVersion) - out.SetJson("Author", image.Author) - out.SetJson("Config", image.Config) - out.Set("Architecture", image.Architecture) - out.Set("Os", image.OS) - out.SetInt64("Size", image.Size) - out.SetInt64("VirtualSize", image.GetParentsSize(0)+image.Size) - if _, err = out.WriteTo(job.Stdout); err != nil { - return err - } - return nil + imageInspect := &types.ImageInspect{ + Id: image.ID, + Parent: image.Parent, + Comment: image.Comment, + Created: image.Created, + Container: image.Container, + ContainerConfig: &image.ContainerConfig, + DockerVersion: image.DockerVersion, + Author: image.Author, + Config: image.Config, + Architecture: image.Architecture, + Os: image.OS, + Size: image.Size, + VirtualSize: image.GetParentsSize(0) + image.Size, } - return fmt.Errorf("No such image: %s", name) + + return imageInspect, nil } -// CmdTarLayer return the tarLayer of the image -func (s *TagStore) CmdTarLayer(job *engine.Job) error { - if len(job.Args) != 1 { - return fmt.Errorf("usage: %s NAME", job.Name) - } - name := job.Args[0] +// ImageTarLayer return the tarLayer of the image +func (s *TagStore) ImageTarLayer(name string, dest io.Writer) error { if image, err := s.LookupImage(name); err == nil && image != nil { fs, err := image.TarLayer() if err != nil { @@ -169,7 +57,7 @@ func (s *TagStore) CmdTarLayer(job *engine.Job) error { } defer fs.Close() - written, err := io.Copy(job.Stdout, fs) + written, err := io.Copy(dest, fs) if err != nil { return err } diff --git a/graph/tag.go b/graph/tag.go deleted file mode 100644 index c0b269946f297..0000000000000 --- a/graph/tag.go +++ /dev/null @@ -1,18 +0,0 @@ -package graph - -import ( - "fmt" - - "github.com/docker/docker/engine" -) - -func (s *TagStore) CmdTag(job *engine.Job) error { - if len(job.Args) != 2 && len(job.Args) != 3 { - return fmt.Errorf("Usage: %s IMAGE REPOSITORY [TAG]\n", job.Name) - } - var tag string - if len(job.Args) == 3 { - tag = job.Args[2] - } - return s.Set(job.Args[1], tag, job.Args[0], job.GetenvBool("force")) -} diff --git a/graph/tags.go b/graph/tags.go index 6346ea8b50dc6..abffe2f562c76 100644 --- a/graph/tags.go +++ b/graph/tags.go @@ -18,6 +18,7 @@ import ( "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/registry" + "github.com/docker/docker/trust" "github.com/docker/docker/utils" "github.com/docker/libtrust" ) @@ -42,6 +43,7 @@ type TagStore struct { pushingPool map[string]chan struct{} registryService *registry.Service eventsService *events.Events + trustService *trust.TrustStore } type Repository map[string]string @@ -64,7 +66,15 @@ func (r Repository) Contains(u Repository) bool { return true } -func NewTagStore(path string, graph *Graph, key libtrust.PrivateKey, registryService *registry.Service, eventsService *events.Events) (*TagStore, error) { +type TagStoreConfig struct { + Graph *Graph + Key libtrust.PrivateKey + Registry *registry.Service + Events *events.Events + Trust *trust.TrustStore +} + +func NewTagStore(path string, cfg *TagStoreConfig) (*TagStore, error) { abspath, err := filepath.Abs(path) if err != nil { return nil, err @@ -72,13 +82,14 @@ func NewTagStore(path string, graph *Graph, key libtrust.PrivateKey, registrySer store := &TagStore{ path: abspath, - graph: graph, - trustKey: key, + graph: cfg.Graph, + trustKey: cfg.Key, Repositories: make(map[string]Repository), pullingPool: make(map[string]chan struct{}), pushingPool: make(map[string]chan struct{}), - registryService: registryService, - eventsService: eventsService, + registryService: cfg.Registry, + eventsService: cfg.Events, + trustService: cfg.Trust, } // Load the json file if it exists, otherwise create it. if err := store.reload(); os.IsNotExist(err) { @@ -104,11 +115,12 @@ func (store *TagStore) save() error { } func (store *TagStore) reload() error { - jsonData, err := ioutil.ReadFile(store.path) + f, err := os.Open(store.path) if err != nil { return err } - if err := json.Unmarshal(jsonData, store); err != nil { + defer f.Close() + if err := json.NewDecoder(f).Decode(&store); err != nil { return err } return nil @@ -224,7 +236,7 @@ func (store *TagStore) Delete(repoName, ref string) (bool, error) { return deleted, store.save() } -func (store *TagStore) Set(repoName, tag, imageName string, force bool) error { +func (store *TagStore) Tag(repoName, tag, imageName string, force bool) error { return store.SetLoad(repoName, tag, imageName, force, nil) } diff --git a/graph/tags_unit_test.go b/graph/tags_unit_test.go index be5624245c596..0482fa58e3caa 100644 --- a/graph/tags_unit_test.go +++ b/graph/tags_unit_test.go @@ -60,7 +60,11 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { if err != nil { t.Fatal(err) } - store, err := NewTagStore(path.Join(root, "tags"), graph, nil, nil, events.New()) + tagCfg := &TagStoreConfig{ + Graph: graph, + Events: events.New(), + } + store, err := NewTagStore(path.Join(root, "tags"), tagCfg) if err != nil { t.Fatal(err) } @@ -72,7 +76,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { if err := graph.Register(img, officialArchive); err != nil { t.Fatal(err) } - if err := store.Set(testOfficialImageName, "", testOfficialImageID, false); err != nil { + if err := store.Tag(testOfficialImageName, "", testOfficialImageID, false); err != nil { t.Fatal(err) } privateArchive, err := fakeTar() @@ -83,7 +87,7 @@ func mkTestTagStore(root string, t *testing.T) *TagStore { if err := graph.Register(img, privateArchive); err != nil { t.Fatal(err) } - if err := store.Set(testPrivateImageName, "", testPrivateImageID, false); err != nil { + if err := store.Tag(testPrivateImageName, "", testPrivateImageID, false); err != nil { t.Fatal(err) } if err := store.SetDigest(testPrivateImageName, testPrivateImageDigest, testPrivateImageID); err != nil { diff --git a/graph/viz.go b/graph/viz.go deleted file mode 100644 index 0c45caa9efbba..0000000000000 --- a/graph/viz.go +++ /dev/null @@ -1,39 +0,0 @@ -package graph - -import ( - "fmt" - "strings" - - "github.com/docker/docker/engine" - "github.com/docker/docker/image" -) - -func (s *TagStore) CmdViz(job *engine.Job) error { - images, _ := s.graph.Map() - if images == nil { - return nil - } - job.Stdout.Write([]byte("digraph docker {\n")) - - var ( - parentImage *image.Image - err error - ) - for _, image := range images { - parentImage, err = image.GetParent() - if err != nil { - return fmt.Errorf("Error while getting parent image: %v", err) - } - if parentImage != nil { - job.Stdout.Write([]byte(" \"" + parentImage.ID + "\" -> \"" + image.ID + "\"\n")) - } else { - job.Stdout.Write([]byte(" base -> \"" + image.ID + "\" [style=invis]\n")) - } - } - - for id, repos := range s.GetRepoRefs() { - job.Stdout.Write([]byte(" \"" + id + "\" [label=\"" + id + "\\n" + strings.Join(repos, "\\n") + "\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n")) - } - job.Stdout.Write([]byte(" base [style=invisible]\n}\n")) - return nil -} diff --git a/hack/dind b/hack/dind index 1242cbffe1ed0..9289ba65561b0 100755 --- a/hack/dind +++ b/hack/dind @@ -3,7 +3,7 @@ set -e # DinD: a wrapper script which allows docker to be run inside a docker container. # Original version by Jerome Petazzoni -# See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ +# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ # # This script should be executed inside a docker container in privilieged mode # ('docker run --privileged', introduced in docker 0.6). @@ -60,7 +60,7 @@ for HIER in $(cut -d: -f2 /proc/1/cgroup); do mkdir -p "$CGROUP/$HIER" - if ! mountpoint -q $CGROUP/$HIER; then + if ! mountpoint -q "$CGROUP/$HIER"; then mount -n -t cgroup -o "$OHIER" cgroup "$CGROUP/$HIER" fi diff --git a/hack/install.sh b/hack/install.sh index fcea11d01e71d..e15565fc79a3f 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -20,213 +20,230 @@ command_exists() { command -v "$@" > /dev/null 2>&1 } -case "$(uname -m)" in - *64) - ;; - *) - echo >&2 'Error: you are not using a 64bit platform.' - echo >&2 'Docker currently only supports 64bit platforms.' - exit 1 - ;; -esac - -if command_exists docker || command_exists lxc-docker; then - echo >&2 'Warning: "docker" or "lxc-docker" command appears to already exist.' - echo >&2 'Please ensure that you do not already have docker installed.' - echo >&2 'You may press Ctrl+C now to abort this process and rectify this situation.' - ( set -x; sleep 20 ) -fi - -user="$(id -un 2>/dev/null || true)" - -sh_c='sh -c' -if [ "$user" != 'root' ]; then - if command_exists sudo; then - sh_c='sudo -E sh -c' - elif command_exists su; then - sh_c='su -c' - else - echo >&2 'Error: this installer needs the ability to run commands as root.' - echo >&2 'We are unable to find either "sudo" or "su" available to make this happen.' - exit 1 +echo_docker_as_nonroot() { + your_user=your-user + [ "$user" != 'root' ] && your_user="$user" + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + cat <<-EOF + + If you would like to use Docker as a non-root user, you should now consider + adding your user to the "docker" group with something like: + + sudo usermod -aG docker $your_user + + Remember that you will have to log out and back in for this to take effect! + + EOF +} + +do_install() { + case "$(uname -m)" in + *64) + ;; + *) + cat >&2 <<-'EOF' + Error: you are not using a 64bit platform. + Docker currently only supports 64bit platforms. + EOF + exit 1 + ;; + esac + + if command_exists docker || command_exists lxc-docker; then + cat >&2 <<-'EOF' + Warning: "docker" or "lxc-docker" command appears to already exist. + Please ensure that you do not already have docker installed. + You may press Ctrl+C now to abort this process and rectify this situation. + EOF + ( set -x; sleep 20 ) fi -fi - -curl='' -if command_exists curl; then - curl='curl -sSL' -elif command_exists wget; then - curl='wget -qO-' -elif command_exists busybox && busybox --list-modules | grep -q wget; then - curl='busybox wget -qO-' -fi - -# perform some very rudimentary platform detection -lsb_dist='' -if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" -fi -if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' -fi -if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='fedora' -fi -if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" -fi - -lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" -case "$lsb_dist" in - amzn|fedora|centos) - if [ "$lsb_dist" = 'amzn' ]; then - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker' - ) + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' else - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-io' - ) - fi - if command_exists docker && [ -e /var/run/docker.sock ]; then - ( - set -x - $sh_c 'docker version' - ) || true + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 fi - your_user=your-user - [ "$user" != 'root' ] && your_user="$user" - echo - echo 'If you would like to use Docker as a non-root user, you should now consider' - echo 'adding your user to the "docker" group with something like:' - echo - echo ' sudo usermod -aG docker' $your_user - echo - echo 'Remember that you will have to log out and back in for this to take effect!' - echo - exit 0 - ;; - - ubuntu|debian|linuxmint) - export DEBIAN_FRONTEND=noninteractive - - did_apt_get_update= - apt_get_update() { - if [ -z "$did_apt_get_update" ]; then - ( set -x; $sh_c 'sleep 3; apt-get update' ) - did_apt_get_update=1 - fi - } + fi - # aufs is preferred over devicemapper; try to ensure the driver is available. - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -q '^ii' 2>/dev/null; then - kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + curl='' + if command_exists curl; then + curl='curl -sSL' + elif command_exists wget; then + curl='wget -qO-' + elif command_exists busybox && busybox --list-modules | grep -q wget; then + curl='busybox wget -qO-' + fi - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + # perform some very rudimentary platform detection + lsb_dist='' + if command_exists lsb_release; then + lsb_dist="$(lsb_release -si)" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then + lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" + fi + if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then + lsb_dist='debian' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then + lsb_dist='fedora' + fi + if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' - echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + case "$lsb_dist" in + amzn|fedora|centos) + if [ "$lsb_dist" = 'amzn' ]; then + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker' + ) + else + ( + set -x + $sh_c 'sleep 3; yum -y -q install docker-io' + ) + fi + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + echo_docker_as_nonroot + exit 0 + ;; + + ubuntu|debian|linuxmint|'elementary os'|kali) + export DEBIAN_FRONTEND=noninteractive + + did_apt_get_update= + apt_get_update() { + if [ -z "$did_apt_get_update" ]; then + ( set -x; $sh_c 'sleep 3; apt-get update' ) + did_apt_get_update=1 + fi + } + + # aufs is preferred over devicemapper; try to ensure the driver is available. + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -q '^ii' 2>/dev/null; then + kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" + + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true + + if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then + echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' + echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' + ( set -x; sleep 10 ) + fi + else + echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' + echo >&2 ' package. We have no AUFS support. Consider installing the packages' + echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' ( set -x; sleep 10 ) fi - else - echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' - echo >&2 ' package. We have no AUFS support. Consider installing the packages' - echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' - ( set -x; sleep 10 ) fi - fi - # install apparmor utils if they're missing and apparmor is enabled in the kernel - # otherwise Docker will fail to start - if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then - if command -v apparmor_parser &> /dev/null; then - echo 'apparmor is enabled in the kernel and apparmor utils were already installed' - else - echo 'apparmor is enabled in the kernel, but apparmor_parser missing' + # install apparmor utils if they're missing and apparmor is enabled in the kernel + # otherwise Docker will fail to start + if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then + if command -v apparmor_parser &> /dev/null; then + echo 'apparmor is enabled in the kernel and apparmor utils were already installed' + else + echo 'apparmor is enabled in the kernel, but apparmor_parser missing' + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + fi + fi + + if [ ! -e /usr/lib/apt/methods/https ]; then apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) + ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) fi - fi + if [ -z "$curl" ]; then + apt_get_update + ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) + curl='curl -sSL' + fi + ( + set -x + if [ "https://get.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" + elif [ "https://test.docker.com/" = "$url" ]; then + $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" + else + $sh_c "$curl ${url}gpg | apt-key add -" + fi + $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list" + $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' + ) + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + echo_docker_as_nonroot + exit 0 + ;; - if [ ! -e /usr/lib/apt/methods/https ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) - fi - if [ -z "$curl" ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) - curl='curl -sSL' - fi - ( - set -x - if [ "https://get.docker.com/" = "$url" ]; then - $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9" - elif [ "https://test.docker.com/" = "$url" ]; then - $sh_c "apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 740B314AE3941731B942C66ADF4FD13717AAD7D6" - else - $sh_c "$curl ${url}gpg | apt-key add -" + gentoo) + if [ "$url" = "https://test.docker.com/" ]; then + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' + + You appear to be trying to install the latest nightly build in Gentoo.' + The portage tree should contain the latest stable release of Docker, but' + if you want something more recent, you can always use the live ebuild' + provided in the "docker" overlay available via layman. For more' + instructions, please see the following URL:' + + https://github.com/tianon/docker-overlay#using-this-overlay' + + After adding the "docker" overlay, you should be able to:' + + emerge -av =app-emulation/docker-9999' + + EOF + exit 1 fi - $sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list" - $sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker' - ) - if command_exists docker && [ -e /var/run/docker.sock ]; then + ( set -x - $sh_c 'docker version' - ) || true - fi - your_user=your-user - [ "$user" != 'root' ] && your_user="$user" - echo - echo 'If you would like to use Docker as a non-root user, you should now consider' - echo 'adding your user to the "docker" group with something like:' - echo - echo ' sudo usermod -aG docker' $your_user - echo - echo 'Remember that you will have to log out and back in for this to take effect!' - echo - exit 0 - ;; - - gentoo) - if [ "$url" = "https://test.docker.com/" ]; then - echo >&2 - echo >&2 ' You appear to be trying to install the latest nightly build in Gentoo.' - echo >&2 ' The portage tree should contain the latest stable release of Docker, but' - echo >&2 ' if you want something more recent, you can always use the live ebuild' - echo >&2 ' provided in the "docker" overlay available via layman. For more' - echo >&2 ' instructions, please see the following URL:' - echo >&2 ' https://github.com/tianon/docker-overlay#using-this-overlay' - echo >&2 ' After adding the "docker" overlay, you should be able to:' - echo >&2 ' emerge -av =app-emulation/docker-9999' - echo >&2 - exit 1 - fi + $sh_c 'sleep 3; emerge app-emulation/docker' + ) + exit 0 + ;; + esac - ( - set -x - $sh_c 'sleep 3; emerge app-emulation/docker' - ) - exit 0 - ;; -esac + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output + cat >&2 <<-'EOF' -cat >&2 <<'EOF' + Either your platform is not easily detectable, is not supported by this + installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have + a package for Docker. Please visit the following URL for more detailed + installation instructions: - Either your platform is not easily detectable, is not supported by this - installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have - a package for Docker. Please visit the following URL for more detailed - installation instructions: + https://docs.docker.com/en/latest/installation/ - https://docs.docker.com/en/latest/installation/ + EOF + exit 1 +} -EOF -exit 1 +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/hack/make.sh b/hack/make.sh index 4117469d6640f..31e08cd37618a 100755 --- a/hack/make.sh +++ b/hack/make.sh @@ -6,7 +6,7 @@ set -e # # Requirements: # - The current directory should be a checkout of the docker source code -# (http://github.com/docker/docker). Whatever version is checked out +# (https://github.com/docker/docker). Whatever version is checked out # will be built. # - The VERSION file, at the root of the repository, should exist, and # will be used as Docker binary version and package version. @@ -24,10 +24,12 @@ set -e set -o pipefail export DOCKER_PKG='github.com/docker/docker' +export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export MAKEDIR="$SCRIPTDIR/make" # We're a nice, sexy, little shell script, and people might try to run us; # but really, they shouldn't. We want to be in a container! -if [ "$(pwd)" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then +if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then { echo "# WARNING! I don't seem to be running in the Docker container." echo "# The result of this command might be an incorrect build, and will not be" @@ -44,7 +46,9 @@ echo DEFAULT_BUNDLES=( validate-dco validate-gofmt + validate-test validate-toml + validate-vet binary @@ -61,7 +65,7 @@ DEFAULT_BUNDLES=( ubuntu ) -VERSION=$(cat ./VERSION) +VERSION=$(< ./VERSION) if command -v git &> /dev/null && git rev-parse &> /dev/null; then GITCOMMIT=$(git rev-parse --short HEAD) if [ -n "$(git status --porcelain --untracked-files=no)" ]; then @@ -81,11 +85,11 @@ if [ "$AUTO_GOPATH" ]; then rm -rf .gopath mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" - export GOPATH="$(pwd)/.gopath:$(pwd)/vendor" + export GOPATH="${PWD}/.gopath:${PWD}/vendor" fi if [ ! "$GOPATH" ]; then - echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH' + echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' echo >&2 ' alternatively, set AUTO_GOPATH=1' exit 1 fi @@ -109,7 +113,7 @@ fi # Use these flags when compiling the tests and final binary IAMSTATIC='true' -source "$(dirname "$BASH_SOURCE")/make/.go-autogen" +source "$SCRIPTDIR/make/.go-autogen" LDFLAGS='-w' LDFLAGS_STATIC='-linkmode external' @@ -250,7 +254,7 @@ bundle() { bundlescript=$1 bundle=$(basename $bundlescript) echo "---> Making bundle: $bundle (in bundles/$VERSION/$bundle)" - mkdir -p bundles/$VERSION/$bundle + mkdir -p "bundles/$VERSION/$bundle" source "$bundlescript" "$(pwd)/bundles/$VERSION/$bundle" } @@ -260,17 +264,22 @@ main() { mkdir -p bundles if [ -e "bundles/$VERSION" ]; then echo "bundles/$VERSION already exists. Removing." - rm -fr bundles/$VERSION && mkdir bundles/$VERSION || exit 1 + rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 echo fi - SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + + if [ "$(go env GOHOSTOS)" != 'windows' ]; then + # Windows and symlinks don't get along well + ln -sfT "$VERSION" bundles/latest + fi + if [ $# -lt 1 ]; then bundles=(${DEFAULT_BUNDLES[@]}) else bundles=($@) fi for bundle in ${bundles[@]}; do - bundle $SCRIPTDIR/make/$bundle + bundle "$SCRIPTDIR/make/$bundle" echo done } diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat new file mode 100644 index 0000000000000..ec635144f6004 --- /dev/null +++ b/hack/make/.build-deb/compat @@ -0,0 +1 @@ +9 diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control new file mode 100644 index 0000000000000..03caae8342a9e --- /dev/null +++ b/hack/make/.build-deb/control @@ -0,0 +1,27 @@ +Source: docker-core +Maintainer: Docker +Homepage: https://dockerproject.com +Vcs-Browser: https://github.com/docker/docker +Vcs-Git: git://github.com/docker/docker.git + +Package: docker-core +Architecture: linux-any +Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} +Recommends: aufs-tools, + ca-certificates, + cgroupfs-mount | cgroup-lite, + git, + xz-utils, + ${apparmor:Recommends} +Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package +Description: Docker: the open-source application container engine + Docker is an open source project to pack, ship and run any application as a + lightweight container + . + Docker containers are both hardware-agnostic and platform-agnostic. This means + they can run anywhere, from your laptop to the largest EC2 compute instance and + they can run anywhere, from your laptop to the largest EC2 compute instance and + everything in between - and they don't require you to use a particular + language, framework or packaging system. That makes them great building blocks + for deploying and scaling web apps, databases, and backend services without + depending on a particular stack or provider. diff --git a/hack/make/.build-deb/docker-core.bash-completion b/hack/make/.build-deb/docker-core.bash-completion new file mode 100644 index 0000000000000..6ea111930886d --- /dev/null +++ b/hack/make/.build-deb/docker-core.bash-completion @@ -0,0 +1 @@ +contrib/completion/bash/docker diff --git a/hack/make/.build-deb/docker-core.docker.default b/hack/make/.build-deb/docker-core.docker.default new file mode 120000 index 0000000000000..4278533d65967 --- /dev/null +++ b/hack/make/.build-deb/docker-core.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/hack/make/.build-deb/docker-core.docker.init b/hack/make/.build-deb/docker-core.docker.init new file mode 120000 index 0000000000000..8cb89d30dde93 --- /dev/null +++ b/hack/make/.build-deb/docker-core.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/hack/make/.build-deb/docker-core.docker.upstart b/hack/make/.build-deb/docker-core.docker.upstart new file mode 120000 index 0000000000000..7e1b64a3e640a --- /dev/null +++ b/hack/make/.build-deb/docker-core.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/hack/make/.build-deb/docker-core.install b/hack/make/.build-deb/docker-core.install new file mode 100644 index 0000000000000..c3f4eb146574d --- /dev/null +++ b/hack/make/.build-deb/docker-core.install @@ -0,0 +1,10 @@ +#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ +#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ +#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ +contrib/*-integration usr/share/docker-core/contrib/ +contrib/check-config.sh usr/share/docker-core/contrib/ +contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ +contrib/init/systemd/docker.service lib/systemd/system/ +contrib/init/systemd/docker.socket lib/systemd/system/ +contrib/mk* usr/share/docker-core/contrib/ +contrib/nuke-graph-directory.sh usr/share/docker-core/contrib/ diff --git a/hack/make/.build-deb/docker-core.manpages b/hack/make/.build-deb/docker-core.manpages new file mode 100644 index 0000000000000..d5cff8a479fa3 --- /dev/null +++ b/hack/make/.build-deb/docker-core.manpages @@ -0,0 +1 @@ +docs/man/man*/* diff --git a/hack/make/.build-deb/docker-core.postinst b/hack/make/.build-deb/docker-core.postinst new file mode 100644 index 0000000000000..eeef6ca801605 --- /dev/null +++ b/hack/make/.build-deb/docker-core.postinst @@ -0,0 +1,20 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + if [ -z "$2" ]; then + if ! getent group docker > /dev/null; then + groupadd --system docker + fi + fi + ;; + abort-*) + # How'd we get here?? + exit 1 + ;; + *) + ;; +esac + +#DEBHELPER# diff --git a/hack/make/.build-deb/docker-core.udev b/hack/make/.build-deb/docker-core.udev new file mode 120000 index 0000000000000..914a361959de3 --- /dev/null +++ b/hack/make/.build-deb/docker-core.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs new file mode 100644 index 0000000000000..b43bf86b50fd8 --- /dev/null +++ b/hack/make/.build-deb/docs @@ -0,0 +1 @@ +README.md diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules new file mode 100755 index 0000000000000..3369f4fc54286 --- /dev/null +++ b/hack/make/.build-deb/rules @@ -0,0 +1,36 @@ +#!/usr/bin/make -f + +VERSION = $(shell cat VERSION) + +override_dh_gencontrol: + # if we're on Ubuntu, we need to Recommends: apparmor + echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-core.substvars + dh_gencontrol + +override_dh_auto_build: + ./hack/make.sh dynbinary + # ./docs/man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here + +override_dh_auto_test: + ./bundles/$(VERSION)/dynbinary/docker -v + +override_dh_strip: + # the SHA1 of dockerinit is important: don't strip it + # also, Go has lots of problems with stripping, so just don't + +override_dh_auto_install: + mkdir -p debian/docker-core/usr/bin + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/docker)" debian/docker-core/usr/bin/docker + mkdir -p debian/docker-core/usr/libexec/docker + cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary/dockerinit)" debian/docker-core/usr/libexec/docker/dockerinit + +override_dh_installinit: + # use "docker" as our service name, not "docker-core" + dh_installinit --name=docker + +override_dh_installudev: + # match our existing priority + dh_installudev --priority=z80 + +%: + dh $@ --with=systemd,bash-completion diff --git a/hack/make/.dockerinit b/hack/make/.dockerinit index fceba7db920bb..4a62ee1addc36 100644 --- a/hack/make/.dockerinit +++ b/hack/make/.dockerinit @@ -2,7 +2,7 @@ set -e IAMSTATIC="true" -source "$(dirname "$BASH_SOURCE")/.go-autogen" +source "${MAKEDIR}/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build \ @@ -30,4 +30,4 @@ else fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another -export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) diff --git a/hack/make/.dockerinit-gccgo b/hack/make/.dockerinit-gccgo index 592a4152c857e..9890863841f23 100644 --- a/hack/make/.dockerinit-gccgo +++ b/hack/make/.dockerinit-gccgo @@ -2,7 +2,7 @@ set -e IAMSTATIC="true" -source "$(dirname "$BASH_SOURCE")/.go-autogen" +source "${MAKEDIR}/.go-autogen" # dockerinit still needs to be a static binary, even if docker is dynamic go build --compiler=gccgo \ @@ -12,6 +12,7 @@ go build --compiler=gccgo \ -g -Wl,--no-export-dynamic $EXTLDFLAGS_STATIC_DOCKER + -lnetgo " \ ./dockerinit @@ -27,4 +28,4 @@ else fi # sha1 our new dockerinit to ensure separate docker and dockerinit always run in a perfect pair compiled for one another -export DOCKER_INITSHA1="$($sha1sum $DEST/dockerinit-$VERSION | cut -d' ' -f1)" +export DOCKER_INITSHA1=$($sha1sum "$DEST/dockerinit-$VERSION" | cut -d' ' -f1) diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start index 570c6c7a9af9f..57fd525028e74 100644 --- a/hack/make/.integration-daemon-start +++ b/hack/make/.integration-daemon-start @@ -25,6 +25,7 @@ if [ -z "$DOCKER_TEST_HOST" ]; then --pidfile "$DEST/docker.pid" \ &> "$DEST/docker.log" ) & + trap "source '${MAKEDIR}/.integration-daemon-stop'" EXIT # make sure that if the script exits unexpectedly, we stop this daemon we just started else export DOCKER_HOST="$DOCKER_TEST_HOST" fi diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop index 319aaa4a1db54..6e1dc844def89 100644 --- a/hack/make/.integration-daemon-stop +++ b/hack/make/.integration-daemon-stop @@ -1,9 +1,11 @@ #!/bin/bash +trap - EXIT # reset EXIT trap applied in .integration-daemon-start + for pidFile in $(find "$DEST" -name docker.pid); do pid=$(set -x; cat "$pidFile") - ( set -x; kill $pid ) - if ! wait $pid; then + ( set -x; kill "$pid" ) + if ! wait "$pid"; then echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" fi done diff --git a/hack/make/binary b/hack/make/binary index 0f57ea0d693e8..d3ec2939c00ca 100644 --- a/hack/make/binary +++ b/hack/make/binary @@ -11,7 +11,7 @@ if [[ "$(uname -s)" == CYGWIN* ]]; then DEST=$(cygpath -mw $DEST) fi -source "$(dirname "$BASH_SOURCE")/.go-autogen" +source "${MAKEDIR}/.go-autogen" go build \ -o "$DEST/$BINARY_FULLNAME" \ diff --git a/hack/make/build-deb b/hack/make/build-deb new file mode 100644 index 0000000000000..a5a6d43870b38 --- /dev/null +++ b/hack/make/build-deb @@ -0,0 +1,70 @@ +#!/bin/bash +set -e + +DEST=$1 + +# subshell so that we can export PATH without breaking other things +( + source "${MAKEDIR}/.integration-daemon-start" + + # TODO consider using frozen images for the dockercore/builder-deb tags + + debVersion="${VERSION//-/'~'}" + # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better + if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then + gitUnix="$(git log -1 --pretty='%at')" + gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" + gitCommit="$(git log -1 --pretty='%h')" + gitVersion="git${gitDate}.0.${gitCommit}" + # gitVersion is now something like 'git20150128.112847.0.17e840a' + debVersion="$debVersion~$gitVersion" + + # $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false + # true + # $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false + # true + # $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false + # true + + # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a + fi + + debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" + debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" + debDate="$(date --rfc-2822)" + + # if go-md2man is available, pre-generate the man pages + ./docs/man/md2man-all.sh -q || true + # TODO decide if it's worth getting go-md2man in _each_ builder environment to avoid this + + # TODO add a configurable knob for _which_ debs to build so we don't have to modify the file or build all of them every time we need to test + for dir in contrib/builder/deb/*/; do + version="$(basename "$dir")" + suite="${version##*-}" + + image="dockercore/builder-deb:$version" + if ! docker inspect "$image" &> /dev/null; then + ( set -x && docker build -t "$image" "$dir" ) + fi + + mkdir -p "$DEST/$version" + cat > "$DEST/$version/Dockerfile.build" <<-EOF + FROM $image + WORKDIR /usr/src/docker + COPY . /usr/src/docker + RUN ln -sfv hack/make/.build-deb debian + RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog + RUN dpkg-buildpackage -uc -us + EOF + cp -a "$DEST/$version/Dockerfile.build" . # can't use $DEST because it's in .dockerignore... + tempImage="docker-temp/build-deb:$version" + ( set -x && docker build -t "$tempImage" -f Dockerfile.build . ) + docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" + docker rmi "$tempImage" + done + + # clean up after ourselves + rm -f Dockerfile.build + + source "${MAKEDIR}/.integration-daemon-stop" +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/cross b/hack/make/cross index 3c5cb0401dc7f..368ebc5ab970a 100644 --- a/hack/make/cross +++ b/hack/make/cross @@ -28,6 +28,6 @@ for platform in $DOCKER_CROSSPLATFORMS; do export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported fi - source "$(dirname "$BASH_SOURCE")/binary" "$DEST/$platform" + source "${MAKEDIR}/binary" "$DEST/$platform" ) done diff --git a/hack/make/dynbinary b/hack/make/dynbinary index f9b43b0e77773..e1b65b48efc41 100644 --- a/hack/make/dynbinary +++ b/hack/make/dynbinary @@ -4,7 +4,7 @@ set -e DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then - source "$(dirname "$BASH_SOURCE")/.dockerinit" + source "${MAKEDIR}/.dockerinit" hash_files "$DEST/dockerinit-$VERSION" else @@ -18,5 +18,5 @@ fi export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "$(dirname "$BASH_SOURCE")/binary" + source "${MAKEDIR}/binary" ) diff --git a/hack/make/dyngccgo b/hack/make/dyngccgo index 738e1450ac511..7bdd404f10573 100644 --- a/hack/make/dyngccgo +++ b/hack/make/dyngccgo @@ -4,7 +4,7 @@ set -e DEST=$1 if [ -z "$DOCKER_CLIENTONLY" ]; then - source "$(dirname "$BASH_SOURCE")/.dockerinit-gccgo" + source "${MAKEDIR}/.dockerinit-gccgo" hash_files "$DEST/dockerinit-$VERSION" else @@ -19,5 +19,5 @@ fi export LDFLAGS_STATIC_DOCKER='' export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "$(dirname "$BASH_SOURCE")/gccgo" + source "${MAKEDIR}/gccgo" ) diff --git a/hack/make/gccgo b/hack/make/gccgo index c85d2fbda55d2..896c2d46c1bc6 100644 --- a/hack/make/gccgo +++ b/hack/make/gccgo @@ -6,8 +6,11 @@ BINARY_NAME="docker-$VERSION" BINARY_EXTENSION="$(binary_extension)" BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" -source "$(dirname "$BASH_SOURCE")/.go-autogen" +source "${MAKEDIR}/.go-autogen" +if [[ "${BUILDFLAGS[@]}" =~ 'netgo ' ]]; then + EXTLDFLAGS_STATIC_DOCKER+=' -lnetgo' +fi go build -compiler=gccgo \ -o "$DEST/$BINARY_FULLNAME" \ "${BUILDFLAGS[@]}" \ diff --git a/hack/make/test-docker-py b/hack/make/test-docker-py index b95cf40af511d..ac5ef3583344c 100644 --- a/hack/make/test-docker-py +++ b/hack/make/test-docker-py @@ -5,26 +5,16 @@ DEST=$1 # subshell so that we can export PATH without breaking other things ( - source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "${MAKEDIR}/.integration-daemon-start" - # we need to wrap up everything in between integration-daemon-start and - # integration-daemon-stop to make sure we kill the daemon and don't hang, - # even and especially on test failures - didFail= - if ! { - dockerPy='/docker-py' - [ -d "$dockerPy" ] || { - dockerPy="$DEST/docker-py" - git clone https://github.com/docker/docker-py.git "$dockerPy" - } + dockerPy='/docker-py' + [ -d "$dockerPy" ] || { + dockerPy="$DEST/docker-py" + git clone https://github.com/docker/docker-py.git "$dockerPy" + } - # exporting PYTHONPATH to import "docker" from our local docker-py - test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py" - }; then - didFail=1 - fi + # exporting PYTHONPATH to import "docker" from our local docker-py + test_env PYTHONPATH="$dockerPy" python "$dockerPy/tests/integration_test.py" - source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" - - [ -z "$didFail" ] # "set -e" ftw -) 2>&1 | tee -a $DEST/test.log + source "${MAKEDIR}/.integration-daemon-stop" +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-integration b/hack/make/test-integration index 5cb7102bc8e1d..206e37abf0c4e 100644 --- a/hack/make/test-integration +++ b/hack/make/test-integration @@ -5,7 +5,7 @@ DEST=$1 INIT=$DEST/../dynbinary/dockerinit-$VERSION [ -x "$INIT" ] || { - source "$(dirname "$BASH_SOURCE")/.dockerinit" + source "${MAKEDIR}/.dockerinit" INIT="$DEST/dockerinit" } export TEST_DOCKERINIT_PATH="$INIT" @@ -22,4 +22,4 @@ bundle_test_integration() { # spews when it is given packages that aren't used bundle_test_integration 2>&1 \ | grep --line-buffered -v '^warning: no packages being tested depend on ' \ - | tee -a $DEST/test.log + | tee -a "$DEST/test.log" diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli index 3ef41d919e55a..db1cb298fd592 100644 --- a/hack/make/test-integration-cli +++ b/hack/make/test-integration-cli @@ -9,23 +9,13 @@ bundle_test_integration_cli() { # subshell so that we can export PATH without breaking other things ( - source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" + source "${MAKEDIR}/.integration-daemon-start" - # we need to wrap up everything in between integration-daemon-start and - # integration-daemon-stop to make sure we kill the daemon and don't hang, - # even and especially on test failures - didFail= - if ! { - source "$(dirname "$BASH_SOURCE")/.ensure-frozen-images" - source "$(dirname "$BASH_SOURCE")/.ensure-httpserver" - source "$(dirname "$BASH_SOURCE")/.ensure-emptyfs" + source "${MAKEDIR}/.ensure-frozen-images" + source "${MAKEDIR}/.ensure-httpserver" + source "${MAKEDIR}/.ensure-emptyfs" - bundle_test_integration_cli - }; then - didFail=1 - fi + bundle_test_integration_cli - source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" - - [ -z "$didFail" ] # "set -e" ftw -) 2>&1 | tee -a $DEST/test.log + source "${MAKEDIR}/.integration-daemon-stop" +) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-unit b/hack/make/test-unit index 7a26428201276..7b6ce089e2cdb 100644 --- a/hack/make/test-unit +++ b/hack/make/test-unit @@ -39,12 +39,12 @@ bundle_test_unit() { mkdir -p "$HOME/.parallel" touch "$HOME/.parallel/ignored_vars" - echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" + echo "$TESTDIRS" | parallel --jobs "$PARALLEL_JOBS" --env _ "${MAKEDIR}/.go-compile-test-dir" rm -rf "$HOME" else # aww, no "parallel" available - fall back to boring for test_dir in $TESTDIRS; do - "$(dirname "$BASH_SOURCE")/.go-compile-test-dir" "$test_dir" || true + "${MAKEDIR}/.go-compile-test-dir" "$test_dir" || true # don't let one directory that fails to build tank _all_ our tests! done fi @@ -85,4 +85,4 @@ go_run_test_dir() { fi } -bundle_test_unit 2>&1 | tee -a $DEST/test.log +bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/ubuntu b/hack/make/ubuntu index e34369eb16383..7543789a187d9 100644 --- a/hack/make/ubuntu +++ b/hack/make/ubuntu @@ -23,7 +23,7 @@ fi # ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" -PACKAGE_URL="http://www.docker.com/" +PACKAGE_URL="https://www.docker.com/" PACKAGE_MAINTAINER="support@docker.com" PACKAGE_DESCRIPTION="Linux container runtime Docker complements LXC with a high-level API which operates at the process @@ -40,26 +40,26 @@ bundle_ubuntu() { DIR=$DEST/build # Include our udev rules - mkdir -p $DIR/etc/udev/rules.d - cp contrib/udev/80-docker.rules $DIR/etc/udev/rules.d/ + mkdir -p "$DIR/etc/udev/rules.d" + cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" # Include our init scripts - mkdir -p $DIR/etc/init - cp contrib/init/upstart/docker.conf $DIR/etc/init/ - mkdir -p $DIR/etc/init.d - cp contrib/init/sysvinit-debian/docker $DIR/etc/init.d/ - mkdir -p $DIR/etc/default - cp contrib/init/sysvinit-debian/docker.default $DIR/etc/default/docker - mkdir -p $DIR/lib/systemd/system - cp contrib/init/systemd/docker.{service,socket} $DIR/lib/systemd/system/ + mkdir -p "$DIR/etc/init" + cp contrib/init/upstart/docker.conf "$DIR/etc/init/" + mkdir -p "$DIR/etc/init.d" + cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" + mkdir -p "$DIR/etc/default" + cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" + mkdir -p "$DIR/lib/systemd/system" + cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" # Include contributed completions - mkdir -p $DIR/etc/bash_completion.d - cp contrib/completion/bash/docker $DIR/etc/bash_completion.d/ - mkdir -p $DIR/usr/share/zsh/vendor-completions - cp contrib/completion/zsh/_docker $DIR/usr/share/zsh/vendor-completions/ - mkdir -p $DIR/etc/fish/completions - cp contrib/completion/fish/docker.fish $DIR/etc/fish/completions/ + mkdir -p "$DIR/etc/bash_completion.d" + cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" + mkdir -p "$DIR/usr/share/zsh/vendor-completions" + cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" + mkdir -p "$DIR/etc/fish/completions" + cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" # Include contributed man pages docs/man/md2man-all.sh -q @@ -76,11 +76,11 @@ bundle_ubuntu() { # Copy the binary # This will fail if the binary bundle hasn't been built - mkdir -p $DIR/usr/bin - cp $DEST/../binary/docker-$VERSION $DIR/usr/bin/docker + mkdir -p "$DIR/usr/bin" + cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" # Generate postinst/prerm/postrm scripts - cat > $DEST/postinst <<'EOF' + cat > "$DEST/postinst" <<'EOF' #!/bin/sh set -e set -u @@ -104,7 +104,7 @@ service docker $_dh_action 2>/dev/null || true #DEBHELPER# EOF - cat > $DEST/prerm <<'EOF' + cat > "$DEST/prerm" <<'EOF' #!/bin/sh set -e set -u @@ -113,7 +113,7 @@ service docker stop 2>/dev/null || true #DEBHELPER# EOF - cat > $DEST/postrm <<'EOF' + cat > "$DEST/postrm" <<'EOF' #!/bin/sh set -e set -u @@ -131,18 +131,18 @@ fi #DEBHELPER# EOF # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way - chmod +x $DEST/postinst $DEST/prerm $DEST/postrm + chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" ( # switch directories so we create *.deb in the right folder - cd $DEST + cd "$DEST" # create lxc-docker-VERSION package - fpm -s dir -C $DIR \ - --name lxc-docker-$VERSION --version "$PKGVERSION" \ - --after-install $DEST/postinst \ - --before-remove $DEST/prerm \ - --after-remove $DEST/postrm \ + fpm -s dir -C "$DIR" \ + --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ + --after-install "$DEST/postinst" \ + --before-remove "$DEST/prerm" \ + --after-remove "$DEST/postrm" \ --architecture "$PACKAGE_ARCHITECTURE" \ --prefix / \ --depends iptables \ @@ -184,8 +184,8 @@ EOF ) # clean up after ourselves so we have a clean output directory - rm $DEST/postinst $DEST/prerm $DEST/postrm - rm -r $DIR + rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" + rm -r "$DIR" } bundle_ubuntu diff --git a/hack/make/validate-dco b/hack/make/validate-dco index 84c47f526d1d5..5ac98728f347d 100644 --- a/hack/make/validate-dco +++ b/hack/make/validate-dco @@ -1,6 +1,6 @@ #!/bin/bash -source "$(dirname "$BASH_SOURCE")/.validate" +source "${MAKEDIR}/.validate" adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') diff --git a/hack/make/validate-gofmt b/hack/make/validate-gofmt index 8fc88cc559dec..7ad9e85576458 100644 --- a/hack/make/validate-gofmt +++ b/hack/make/validate-gofmt @@ -1,6 +1,6 @@ #!/bin/bash -source "$(dirname "$BASH_SOURCE")/.validate" +source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) diff --git a/hack/make/validate-test b/hack/make/validate-test new file mode 100644 index 0000000000000..d9d05f3bea8f9 --- /dev/null +++ b/hack/make/validate-test @@ -0,0 +1,35 @@ +#!/bin/bash + +# Make sure we're not using gos' Testing package any more in integration-cli + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) +unset IFS + +badFiles=() +for f in "${files[@]}"; do + # skip check_test.go since it *does* use the testing package + if [ "$f" = "integration-cli/check_test.go" ]; then + continue + fi + + # we use "git show" here to validate that what's committed is formatted + if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then + badFiles+=( "$f" ) + fi +done + +if [ ${#badFiles[@]} -eq 0 ]; then + echo 'Congratulations! No testing.T found.' +else + { + echo "These files use the wrong testing infrastructure:" + for f in "${badFiles[@]}"; do + echo " - $f" + done + echo + } >&2 + false +fi diff --git a/hack/make/validate-toml b/hack/make/validate-toml index 16c228d14eb97..18f26ee757997 100644 --- a/hack/make/validate-toml +++ b/hack/make/validate-toml @@ -1,6 +1,6 @@ #!/bin/bash -source "$(dirname "$BASH_SOURCE")/.validate" +source "${MAKEDIR}/.validate" IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) diff --git a/hack/make/validate-vet b/hack/make/validate-vet new file mode 100644 index 0000000000000..febe93e5c1e2e --- /dev/null +++ b/hack/make/validate-vet @@ -0,0 +1,32 @@ +#!/bin/bash + +source "${MAKEDIR}/.validate" + +IFS=$'\n' +files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) +unset IFS + +errors=() +for f in "${files[@]}"; do + # we use "git show" here to validate that what's committed passes go vet + failedVet=$(go vet "$f") + if [ "$failedVet" ]; then + errors+=( "$failedVet" ) + fi +done + + +if [ ${#errors[@]} -eq 0 ]; then + echo 'Congratulations! All Go source files have been vetted.' +else + { + echo "Errors from go vet:" + for err in "${errors[@]}"; do + echo " - $err" + done + echo + echo 'Please fix the above errors. You can test via "go vet" and commit the result.' + echo + } >&2 + false +fi diff --git a/hack/release.sh b/hack/release.sh index da95808c5a198..04772546fd658 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -60,7 +60,7 @@ if [ "$1" != '--release-regardless-of-test-failure' ]; then ) fi -VERSION=$(cat VERSION) +VERSION=$(< VERSION) BUCKET=$AWS_S3_BUCKET # These are the 2 keys we've used to sign the deb's @@ -71,23 +71,23 @@ BUCKET=$AWS_S3_BUCKET setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). - s3cmd mb s3://$BUCKET 2>/dev/null || true + s3cmd mb "s3://$BUCKET" 2>/dev/null || true # Check access to the bucket. # s3cmd has no useful exit status, so we cannot check that. # Instead, we check if it outputs anything on standard output. # (When there are problems, it uses standard error instead.) - s3cmd info s3://$BUCKET | grep -q . + s3cmd info "s3://$BUCKET" | grep -q . # Make the bucket accessible through website endpoints. - s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET + s3cmd ws-create --ws-index index --ws-error error "s3://$BUCKET" } # write_to_s3 uploads the contents of standard input to the specified S3 url. write_to_s3() { DEST=$1 F=`mktemp` - cat > $F - s3cmd --acl-public --mime-type='text/plain' put $F $DEST - rm -f $F + cat > "$F" + s3cmd --acl-public --mime-type='text/plain' put "$F" "$DEST" + rm -f "$F" } s3_url() { @@ -246,20 +246,20 @@ release_build() { # 1. A full APT repository is published at $BUCKET/ubuntu/ # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index release_ubuntu() { - [ -e bundles/$VERSION/ubuntu ] || { + [ -e "bundles/$VERSION/ubuntu" ] || { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } # Sign our packages dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \ - --sign builder bundles/$VERSION/ubuntu/*.deb + --sign builder "bundles/$VERSION/ubuntu/"*.deb # Setup the APT repo APTDIR=bundles/$VERSION/ubuntu/apt - mkdir -p $APTDIR/conf $APTDIR/db - s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true - cat > $APTDIR/conf/distributions < "$APTDIR/conf/distributions" < bundles/$VERSION/ubuntu/gpg - s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg + s3cmd sync "$HOME/.gnupg/" "s3://$BUCKET/ubuntu/.gnupg/" + gpg --armor --export releasedocker > "bundles/$VERSION/ubuntu/gpg" + s3cmd --acl-public put "bundles/$VERSION/ubuntu/gpg" "s3://$BUCKET/gpg" local gpgFingerprint=36A1D7869245C8950F966E92D8576A8BA88D21E9 if [[ $BUCKET == test* ]]; then @@ -287,7 +287,7 @@ EOF fi # Upload repo - s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ + s3cmd --acl-public sync "$APTDIR/" "s3://$BUCKET/ubuntu/" cat <&2 './hack/make.sh must be run before release_binaries' exit 1 } @@ -341,29 +341,29 @@ EOF # Add redirect at /builds/info for URL-backwards-compatibility rm -rf /tmp/emptyfile && touch /tmp/emptyfile - s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/builds/info + s3cmd --acl-public --add-header='x-amz-website-redirect-location:/builds/' --mime-type='text/plain' put /tmp/emptyfile "s3://$BUCKET/builds/info" if [ -z "$NOLATEST" ]; then echo "Advertising $VERSION on $BUCKET as most recent version" - echo $VERSION | write_to_s3 s3://$BUCKET/latest + echo "$VERSION" | write_to_s3 "s3://$BUCKET/latest" fi } # Upload the index script release_index() { - sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 s3://$BUCKET/index + sed "s,url='https://get.docker.com/',url='$(s3_url)/'," hack/install.sh | write_to_s3 "s3://$BUCKET/index" } release_test() { if [ -e "bundles/$VERSION/test" ]; then - s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/ + s3cmd --acl-public sync "bundles/$VERSION/test/" "s3://$BUCKET/test/" fi } setup_gpg() { # Make sure that we have our keys - mkdir -p $HOME/.gnupg/ - s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ $HOME/.gnupg/ || true + mkdir -p "$HOME/.gnupg/" + s3cmd sync "s3://$BUCKET/ubuntu/.gnupg/" "$HOME/.gnupg/" || true gpg --list-keys releasedocker >/dev/null || { gpg --gen-key --batch <
- Each layer has an associated A JSON structure which describes some + Each layer has an associated JSON structure which describes some basic information about the image such as date created, author, and the ID of its parent image as well as execution/runtime configuration like its entry point, default arguments, CPU/memory shares, networking, and @@ -81,7 +81,7 @@ This specification uses the following terms: times of any entries differ. For this reason, image checksums are generated using the TarSum algorithm which produces a cryptographic hash of file contents and selected headers only. Details of this - algorithm are described in the separate [TarSum specification](https://github.com/docker/docker/blob/master/pkg/tarsum/tarsum_spec.md). + algorithm are described in the separate TarSum specification.
Tag @@ -492,9 +492,9 @@ Changeset tar archives. There is also a format for a single archive which contains complete information about an image, including: - - repository names/tags - - all image layer JSON files - - all tar archives of each layer filesystem changesets + - repository names/tags + - all image layer JSON files + - all tar archives of each layer filesystem changesets For example, here's what the full archive of `library/busybox` is (displayed in `tree` format): @@ -523,10 +523,10 @@ For example, here's what the full archive of `library/busybox` is (displayed in There are one or more directories named with the ID for each layer in a full image. Each of these directories contains 3 files: - * `VERSION` - The schema version of the `json` file - * `json` - The JSON metadata for an image layer - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. + * `VERSION` - The schema version of the `json` file + * `json` - The JSON metadata for an image layer + * `layer.tar` - The Tar archive of the filesystem changeset for an image + layer. The content of the `VERSION` files is simply the semantic version of the JSON metadata schema: diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go new file mode 100644 index 0000000000000..202799cbf153b --- /dev/null +++ b/integration-cli/check_test.go @@ -0,0 +1,81 @@ +package main + +import ( + "fmt" + "testing" + "time" + + "github.com/go-check/check" +) + +func Test(t *testing.T) { + check.TestingT(t) +} + +type TimerSuite struct { + start time.Time +} + +func (s *TimerSuite) SetUpTest(c *check.C) { + s.start = time.Now() +} + +func (s *TimerSuite) TearDownTest(c *check.C) { + fmt.Printf("%-60s%.2f\n", c.TestName(), time.Since(s.start).Seconds()) +} + +func init() { + check.Suite(&DockerSuite{}) +} + +type DockerSuite struct { + TimerSuite +} + +func (s *DockerSuite) TearDownTest(c *check.C) { + deleteAllContainers() + deleteAllImages() + s.TimerSuite.TearDownTest(c) +} + +func init() { + check.Suite(&DockerRegistrySuite{ + ds: &DockerSuite{}, + }) +} + +type DockerRegistrySuite struct { + ds *DockerSuite + reg *testRegistryV2 +} + +func (s *DockerRegistrySuite) SetUpTest(c *check.C) { + s.reg = setupRegistry(c) + s.ds.SetUpTest(c) +} + +func (s *DockerRegistrySuite) TearDownTest(c *check.C) { + s.reg.Close() + s.ds.TearDownTest(c) +} + +func init() { + check.Suite(&DockerDaemonSuite{ + ds: &DockerSuite{}, + }) +} + +type DockerDaemonSuite struct { + ds *DockerSuite + d *Daemon +} + +func (s *DockerDaemonSuite) SetUpTest(c *check.C) { + s.d = NewDaemon(c) + s.ds.SetUpTest(c) +} + +func (s *DockerDaemonSuite) TearDownTest(c *check.C) { + s.d.Stop() + s.ds.TearDownTest(c) +} diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go index 3257798c563f5..c784d5c369cef 100644 --- a/integration-cli/docker_api_attach_test.go +++ b/integration-cli/docker_api_attach_test.go @@ -4,23 +4,23 @@ import ( "bytes" "os/exec" "strings" - "testing" "time" + "github.com/go-check/check" + "code.google.com/p/go.net/websocket" ) -func TestGetContainersAttachWebsocket(t *testing.T) { +func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-dit", "busybox", "cat") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } - defer deleteAllContainers() rwc, err := sockConn(time.Duration(10 * time.Second)) if err != nil { - t.Fatal(err) + c.Fatal(err) } cleanedContainerID := strings.TrimSpace(out) @@ -29,39 +29,51 @@ func TestGetContainersAttachWebsocket(t *testing.T) { "http://localhost", ) if err != nil { - t.Fatal(err) + c.Fatal(err) } ws, err := websocket.NewClient(config, rwc) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ws.Close() expected := []byte("hello") actual := make([]byte, len(expected)) - outChan := make(chan string) + + outChan := make(chan error) go func() { - if _, err := ws.Read(actual); err != nil { - t.Fatal(err) - } - outChan <- "done" + _, err := ws.Read(actual) + outChan <- err + close(outChan) }() - inChan := make(chan string) + inChan := make(chan error) go func() { - if _, err := ws.Write(expected); err != nil { - t.Fatal(err) - } - inChan <- "done" + _, err := ws.Write(expected) + inChan <- err + close(inChan) }() - <-inChan - <-outChan + select { + case err := <-inChan: + if err != nil { + c.Fatal(err) + } + case <-time.After(5 * time.Second): + c.Fatal("Timeout writing to ws") + } + + select { + case err := <-outChan: + if err != nil { + c.Fatal(err) + } + case <-time.After(5 * time.Second): + c.Fatal("Timeout reading from ws") + } if !bytes.Equal(expected, actual) { - t.Fatal("Expected output on websocket to match input") + c.Fatal("Expected output on websocket to match input") } - - logDone("container attach websocket - can echo input via cat") } diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go index 02d069f5987d1..1fec3912e6de1 100644 --- a/integration-cli/docker_api_containers_test.go +++ b/integration-cli/docker_api_containers_test.go @@ -4,67 +4,61 @@ import ( "bytes" "encoding/json" "io" + "net/http" "os/exec" "strings" - "testing" "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/go-check/check" ) -func TestContainerApiGetAll(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestContainerApiGetAll(c *check.C) { startCount, err := getContainerCount() if err != nil { - t.Fatalf("Cannot query container count: %v", err) + c.Fatalf("Cannot query container count: %v", err) } name := "getall" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error on container creation: %v, output: %q", err, out) + c.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/json?all=1", nil) - if err != nil { - t.Fatalf("GET all containers sockRequest failed: %v", err) - } + status, body, err := sockRequest("GET", "/containers/json?all=1", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) var inspectJSON []struct { Names []string } if err = json.Unmarshal(body, &inspectJSON); err != nil { - t.Fatalf("unable to unmarshal response body: %v", err) + c.Fatalf("unable to unmarshal response body: %v", err) } if len(inspectJSON) != startCount+1 { - t.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) + c.Fatalf("Expected %d container(s), %d found (started with: %d)", startCount+1, len(inspectJSON), startCount) } if actual := inspectJSON[0].Names[0]; actual != "/"+name { - t.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) + c.Fatalf("Container Name mismatch. Expected: %q, received: %q\n", "/"+name, actual) } - - logDone("container REST API - check GET json/all=1") } -func TestContainerApiGetExport(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { name := "exportcontainer" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error on container creation: %v, output: %q", err, out) + c.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/"+name+"/export", nil) - if err != nil { - t.Fatalf("GET containers/export sockRequest failed: %v", err) - } + status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { @@ -73,7 +67,7 @@ func TestContainerApiGetExport(t *testing.T) { if err == io.EOF { break } - t.Fatal(err) + c.Fatal(err) } if h.Name == "test" { found = true @@ -82,33 +76,28 @@ func TestContainerApiGetExport(t *testing.T) { } if !found { - t.Fatalf("The created test file has not been found in the exported image") + c.Fatalf("The created test file has not been found in the exported image") } - - logDone("container REST API - check GET containers/export") } -func TestContainerApiGetChanges(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestContainerApiGetChanges(c *check.C) { name := "changescontainer" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "rm", "/etc/passwd") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error on container creation: %v, output: %q", err, out) + c.Fatalf("Error on container creation: %v, output: %q", err, out) } - body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) - if err != nil { - t.Fatalf("GET containers/changes sockRequest failed: %v", err) - } + status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) changes := []struct { Kind int Path string }{} if err = json.Unmarshal(body, &changes); err != nil { - t.Fatalf("unable to unmarshal response body: %v", err) + c.Fatalf("unable to unmarshal response body: %v", err) } // Check the changelog for removal of /etc/passwd @@ -119,56 +108,50 @@ func TestContainerApiGetChanges(t *testing.T) { } } if !success { - t.Fatalf("/etc/passwd has been removed but is not present in the diff") + c.Fatalf("/etc/passwd has been removed but is not present in the diff") } - - logDone("container REST API - check GET containers/changes") } -func TestContainerApiStartVolumeBinds(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestContainerApiStartVolumeBinds(c *check.C) { name := "testing" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } - if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { - t.Fatal(err) - } + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) bindPath := randomUnixTmpDirPath("test") config = map[string]interface{}{ "Binds": []string{bindPath + ":/tmp"}, } - if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { - t.Fatal(err) - } + status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) pth, err := inspectFieldMap(name, "Volumes", "/tmp") if err != nil { - t.Fatal(err) + c.Fatal(err) } if pth != bindPath { - t.Fatalf("expected volume host path to be %s, got %s", bindPath, pth) + c.Fatalf("expected volume host path to be %s, got %s", bindPath, pth) } - - logDone("container REST API - check volume binds on start") } // Test for GH#10618 -func TestContainerApiStartDupVolumeBinds(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestContainerApiStartDupVolumeBinds(c *check.C) { name := "testdups" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{"/tmp": {}}, } - if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { - t.Fatal(err) - } + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) bindPath1 := randomUnixTmpDirPath("test1") bindPath2 := randomUnixTmpDirPath("test2") @@ -176,67 +159,62 @@ func TestContainerApiStartDupVolumeBinds(t *testing.T) { config = map[string]interface{}{ "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, } - if body, err := sockRequest("POST", "/containers/"+name+"/start", config); err == nil { - t.Fatal("expected container start to fail when duplicate volume binds to same container path") - } else { - if !strings.Contains(string(body), "Duplicate volume") { - t.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err) - } - } + status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) - logDone("container REST API - check for duplicate volume binds error on start") + if !strings.Contains(string(body), "Duplicate volume") { + c.Fatalf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err) + } } -func TestContainerApiStartVolumesFrom(t *testing.T) { - defer deleteAllContainers() + +func (s *DockerSuite) TestContainerApiStartVolumesFrom(c *check.C) { volName := "voltst" volPath := "/tmp" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - name := "testing" + name := "TestContainerApiStartDupVolumeBinds" config := map[string]interface{}{ "Image": "busybox", "Volumes": map[string]struct{}{volPath: {}}, } - if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { - t.Fatal(err) - } + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) config = map[string]interface{}{ "VolumesFrom": []string{volName}, } - if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { - t.Fatal(err) - } + status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) pth, err := inspectFieldMap(name, "Volumes", volPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } pth2, err := inspectFieldMap(volName, "Volumes", volPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } if pth != pth2 { - t.Fatalf("expected volume host path to be %s, got %s", pth, pth2) + c.Fatalf("expected volume host path to be %s, got %s", pth, pth2) } - - logDone("container REST API - check VolumesFrom on start") } // Ensure that volumes-from has priority over binds/anything else // This is pretty much the same as TestRunApplyVolumesFromBeforeVolumes, except with passing the VolumesFrom and the bind on start -func TestVolumesFromHasPriority(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestVolumesFromHasPriority(c *check.C) { volName := "voltst2" volPath := "/tmp" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", volName, "-v", volPath, "busybox")); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } name := "testing" @@ -245,108 +223,101 @@ func TestVolumesFromHasPriority(t *testing.T) { "Volumes": map[string]struct{}{volPath: {}}, } - if _, err := sockRequest("POST", "/containers/create?name="+name, config); err != nil && !strings.Contains(err.Error(), "201 Created") { - t.Fatal(err) - } + status, _, err := sockRequest("POST", "/containers/create?name="+name, config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) bindPath := randomUnixTmpDirPath("test") config = map[string]interface{}{ "VolumesFrom": []string{volName}, "Binds": []string{bindPath + ":/tmp"}, } - if _, err := sockRequest("POST", "/containers/"+name+"/start", config); err != nil && !strings.Contains(err.Error(), "204 No Content") { - t.Fatal(err) - } + status, _, err = sockRequest("POST", "/containers/"+name+"/start", config) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) pth, err := inspectFieldMap(name, "Volumes", volPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } pth2, err := inspectFieldMap(volName, "Volumes", volPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } if pth != pth2 { - t.Fatalf("expected volume host path to be %s, got %s", pth, pth2) + c.Fatalf("expected volume host path to be %s, got %s", pth, pth2) } - - logDone("container REST API - check VolumesFrom has priority") } -func TestGetContainerStats(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestGetContainerStats(c *check.C) { var ( name = "statscontainer" runCmd = exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "top") ) out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error on container creation: %v, output: %q", err, out) + c.Fatalf("Error on container creation: %v, output: %q", err, out) } type b struct { - body []byte - err error + status int + body []byte + err error } bc := make(chan b, 1) go func() { - body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{body, err} + status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + bc <- b{status, body, err} }() // allow some time to stream the stats from the container time.Sleep(4 * time.Second) if _, err := runCommand(exec.Command(dockerBinary, "rm", "-f", name)); err != nil { - t.Fatal(err) + c.Fatal(err) } // collect the results from the stats stream or timeout and fail // if the stream was not disconnected. select { case <-time.After(2 * time.Second): - t.Fatal("stream was not closed after container was removed") + c.Fatal("stream was not closed after container was removed") case sr := <-bc: - if sr.err != nil { - t.Fatal(sr.err) - } + c.Assert(sr.err, check.IsNil) + c.Assert(sr.status, check.Equals, http.StatusOK) dec := json.NewDecoder(bytes.NewBuffer(sr.body)) var s *types.Stats // decode only one object from the stream if err := dec.Decode(&s); err != nil { - t.Fatal(err) + c.Fatal(err) } } - logDone("container REST API - check GET containers/stats") } -func TestGetStoppedContainerStats(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { + // TODO: this test does nothing because we are c.Assert'ing in goroutine var ( name = "statscontainer" runCmd = exec.Command(dockerBinary, "create", "--name", name, "busybox", "top") ) out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error on container creation: %v, output: %q", err, out) + c.Fatalf("Error on container creation: %v, output: %q", err, out) } go func() { // We'll never get return for GET stats from sockRequest as of now, // just send request and see if panic or error would happen on daemon side. - _, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - if err != nil { - t.Fatal(err) - } + status, _, err := sockRequest("GET", "/containers/"+name+"/stats", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) }() // allow some time to send request and let daemon deal with it time.Sleep(1 * time.Second) - - logDone("container REST API - check GET stopped containers/stats") } -func TestBuildApiDockerfilePath(t *testing.T) { +func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying the path to the dockerfile buffer := new(bytes.Buffer) @@ -358,28 +329,30 @@ func TestBuildApiDockerfilePath(t *testing.T) { Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { - t.Fatalf("failed to write tar file header: %v", err) + c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { - t.Fatalf("failed to write tar file content: %v", err) + c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { - t.Fatalf("failed to close tar archive: %v", err) + c.Fatalf("failed to close tar archive: %v", err) } - out, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") - if err == nil { - t.Fatalf("Build was supposed to fail: %s", out) + res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + out, err := readBody(body) + if err != nil { + c.Fatal(err) } if !strings.Contains(string(out), "must be within the build context") { - t.Fatalf("Didn't complain about leaving build context: %s", out) + c.Fatalf("Didn't complain about leaving build context: %s", out) } - - logDone("container REST API - check build w/bad Dockerfile path") } -func TestBuildApiDockerFileRemote(t *testing.T) { +func (s *DockerSuite) TestBuildApiDockerFileRemote(c *check.C) { server, err := fakeStorage(map[string]string{ "testD": `FROM busybox COPY * /tmp/ @@ -387,13 +360,17 @@ RUN find / -name ba* RUN find /tmp/`, }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() - buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + buf, err := readBody(body) if err != nil { - t.Fatalf("Build failed: %s", err) + c.Fatal(err) } // Make sure Dockerfile exists. @@ -401,36 +378,36 @@ RUN find /tmp/`, out := string(buf) if !strings.Contains(out, "/tmp/Dockerfile") || strings.Contains(out, "baz") { - t.Fatalf("Incorrect output: %s", out) + c.Fatalf("Incorrect output: %s", out) } - - logDone("container REST API - check build with -f from remote") } -func TestBuildApiLowerDockerfile(t *testing.T) { +func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) { git, err := fakeGIT("repo", map[string]string{ "dockerfile": `FROM busybox RUN echo from dockerfile`, }, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer git.Close() - buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + buf, err := readBody(body) if err != nil { - t.Fatalf("Build failed: %s\n%q", err, buf) + c.Fatal(err) } out := string(buf) if !strings.Contains(out, "from dockerfile") { - t.Fatalf("Incorrect output: %s", out) + c.Fatalf("Incorrect output: %s", out) } - - logDone("container REST API - check build with lower dockerfile") } -func TestBuildApiBuildGitWithF(t *testing.T) { +func (s *DockerSuite) TestBuildApiBuildGitWithF(c *check.C) { git, err := fakeGIT("repo", map[string]string{ "baz": `FROM busybox RUN echo from baz`, @@ -438,26 +415,28 @@ RUN echo from baz`, RUN echo from Dockerfile`, }, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer git.Close() // Make sure it tries to 'dockerfile' query param value - buf, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + buf, err := readBody(body) if err != nil { - t.Fatalf("Build failed: %s\n%q", err, buf) + c.Fatal(err) } out := string(buf) if !strings.Contains(out, "from baz") { - t.Fatalf("Incorrect output: %s", out) + c.Fatalf("Incorrect output: %s", out) } - - logDone("container REST API - check build from git w/F") } -func TestBuildApiDoubleDockerfile(t *testing.T) { - testRequires(t, UnixCli) // dockerfile overwrites Dockerfile on Windows +func (s *DockerSuite) TestBuildApiDoubleDockerfile(c *check.C) { + testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows git, err := fakeGIT("repo", map[string]string{ "Dockerfile": `FROM busybox RUN echo from Dockerfile`, @@ -465,25 +444,27 @@ RUN echo from Dockerfile`, RUN echo from dockerfile`, }, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer git.Close() // Make sure it tries to 'dockerfile' query param value - buf, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + buf, err := readBody(body) if err != nil { - t.Fatalf("Build failed: %s", err) + c.Fatal(err) } out := string(buf) if !strings.Contains(out, "from Dockerfile") { - t.Fatalf("Incorrect output: %s", out) + c.Fatalf("Incorrect output: %s", out) } - - logDone("container REST API - check build with two dockerfiles") } -func TestBuildApiDockerfileSymlink(t *testing.T) { +func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying a symlink as the path to the dockerfile buffer := new(bytes.Buffer) @@ -495,15 +476,19 @@ func TestBuildApiDockerfileSymlink(t *testing.T) { Typeflag: tar.TypeSymlink, Linkname: "/etc/passwd", }); err != nil { - t.Fatalf("failed to write tar file header: %v", err) + c.Fatalf("failed to write tar file header: %v", err) } if err := tw.Close(); err != nil { - t.Fatalf("failed to close tar archive: %v", err) + c.Fatalf("failed to close tar archive: %v", err) } - out, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") - if err == nil { - t.Fatalf("Build was supposed to fail: %s", out) + res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + + out, err := readBody(body) + if err != nil { + c.Fatal(err) } // The reason the error is "Cannot locate specified Dockerfile" is because @@ -511,45 +496,365 @@ func TestBuildApiDockerfileSymlink(t *testing.T) { // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is // a nonexistent file. if !strings.Contains(string(out), "Cannot locate specified Dockerfile: Dockerfile") { - t.Fatalf("Didn't complain about leaving build context: %s", out) + c.Fatalf("Didn't complain about leaving build context: %s", out) } - - logDone("container REST API - check build w/bad Dockerfile symlink path") } // #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume -func TestPostContainerBindNormalVolume(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestPostContainerBindNormalVolume(c *check.C) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=one", "busybox")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } fooDir, err := inspectFieldMap("one", "Volumes", "/foo") if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "create", "-v", "/foo", "--name=two", "busybox")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} - _, err = sockRequest("POST", "/containers/two/start", bindSpec) - if err != nil && !strings.Contains(err.Error(), "204 No Content") { - t.Fatal(err) - } + status, _, err := sockRequest("POST", "/containers/two/start", bindSpec) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) fooDir2, err := inspectFieldMap("two", "Volumes", "/foo") if err != nil { - t.Fatal(err) + c.Fatal(err) } if fooDir2 != fooDir { - t.Fatalf("expected volume path to be %s, got: %s", fooDir, fooDir2) + c.Fatalf("expected volume path to be %s, got: %s", fooDir, fooDir2) + } +} + +func (s *DockerSuite) TestContainerApiPause(c *check.C) { + defer unpauseAllContainers() + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "30") + out, _, err := runCommandWithOutput(runCmd) + + if err != nil { + c.Fatalf("failed to create a container: %s, %v", out, err) + } + ContainerID := strings.TrimSpace(out) + + status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) + + pausedContainers, err := getSliceOfPausedContainers() + + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + + if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + } + + status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) + c.Assert(status, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) + + pausedContainers, err = getSliceOfPausedContainers() + + if err != nil { + c.Fatalf("error thrown while checking if containers were paused: %v", err) + } + + if pausedContainers != nil { + c.Fatalf("There should be no paused container.") + } +} + +func (s *DockerSuite) TestContainerApiTop(c *check.C) { + out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "top").CombinedOutput() + if err != nil { + c.Fatal(err, out) + } + id := strings.TrimSpace(string(out)) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + type topResp struct { + Titles []string + Processes [][]string + } + var top topResp + status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + if err := json.Unmarshal(b, &top); err != nil { + c.Fatal(err) } - logDone("container REST API - can use path from normal volume as bind-mount to overwrite another volume") + if len(top.Titles) != 11 { + c.Fatalf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles) + } + + if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { + c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) + } + if len(top.Processes) != 2 { + c.Fatalf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes) + } + if top.Processes[0][10] != "/bin/sh -c top" { + c.Fatalf("expected `/bin/sh -c top`, found: %s", top.Processes[0][10]) + } + if top.Processes[1][10] != "top" { + c.Fatalf("expected `top`, found: %s", top.Processes[1][10]) + } +} + +func (s *DockerSuite) TestContainerApiCommit(c *check.C) { + cName := "testapicommit" + out, err := exec.Command(dockerBinary, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test").CombinedOutput() + if err != nil { + c.Fatal(err, out) + } + + name := "TestContainerApiCommit" + status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + type resp struct { + Id string + } + var img resp + if err := json.Unmarshal(b, &img); err != nil { + c.Fatal(err) + } + + cmd, err := inspectField(img.Id, "Config.Cmd") + if err != nil { + c.Fatal(err) + } + if cmd != "{[/bin/sh -c touch /test]}" { + c.Fatalf("got wrong Cmd from commit: %q", cmd) + } + // sanity check, make sure the image is what we think it is + out, err = exec.Command(dockerBinary, "run", img.Id, "ls", "/test").CombinedOutput() + if err != nil { + c.Fatalf("error checking committed image: %v - %q", err, string(out)) + } +} + +func (s *DockerSuite) TestContainerApiCreate(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, + } + + status, b, err := sockRequest("POST", "/containers/create", config) + c.Assert(status, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + type createResp struct { + Id string + } + var container createResp + if err := json.Unmarshal(b, &container); err != nil { + c.Fatal(err) + } + + out, err := exec.Command(dockerBinary, "start", "-a", container.Id).CombinedOutput() + if err != nil { + c.Fatal(out, err) + } + if strings.TrimSpace(string(out)) != "/test" { + c.Fatalf("expected output `/test`, got %q", out) + } +} + +func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) { + var hostName = "test-host" + config := map[string]interface{}{ + "Image": "busybox", + "Hostname": hostName, + } + + _, b, err := sockRequest("POST", "/containers/create", config) + if err != nil && !strings.Contains(err.Error(), "200 OK: 201") { + c.Fatal(err) + } + type createResp struct { + Id string + } + var container createResp + if err := json.Unmarshal(b, &container); err != nil { + c.Fatal(err) + } + + var id = container.Id + + _, bodyGet, err := sockRequest("GET", "/containers/"+id+"/json", nil) + + type configLocal struct { + Hostname string + } + type getResponse struct { + Id string + Config configLocal + } + + var containerInfo getResponse + if err := json.Unmarshal(bodyGet, &containerInfo); err != nil { + c.Fatal(err) + } + var hostNameActual = containerInfo.Config.Hostname + if hostNameActual != "test-host" { + c.Fatalf("Mismatched Hostname, Expected %v, Actual: %v ", hostName, hostNameActual) + } +} + +func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) { + config := map[string]interface{}{ + "Image": "busybox", + } + + create := func(ct string) (*http.Response, io.ReadCloser, error) { + jsonData := bytes.NewBuffer(nil) + if err := json.NewEncoder(jsonData).Encode(config); err != nil { + c.Fatal(err) + } + return sockRequestRaw("POST", "/containers/create", jsonData, ct) + } + + // Try with no content-type + res, body, err := create("") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + body.Close() + + // Try with wrong content-type + res, body, err = create("application/xml") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + body.Close() + + // now application/json + res, body, err = create("application/json") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusCreated) + body.Close() +} + +// Issue 7941 - test to make sure a "null" in JSON is just ignored. +// W/o this fix a null in JSON would be parsed into a string var as "null" +func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) { + config := `{ + "Hostname":"", + "Domainname":"", + "Memory":0, + "MemorySwap":0, + "CpuShares":0, + "Cpuset":null, + "AttachStdin":true, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "ExposedPorts":{}, + "Tty":true, + "OpenStdin":true, + "StdinOnce":true, + "Env":[], + "Cmd":"ls", + "Image":"busybox", + "Volumes":{}, + "WorkingDir":"", + "Entrypoint":null, + "NetworkDisabled":false, + "OnBuild":null}` + + res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + c.Assert(res.StatusCode, check.Equals, http.StatusCreated) + c.Assert(err, check.IsNil) + + b, err := readBody(body) + if err != nil { + c.Fatal(err) + } + type createResp struct { + Id string + } + var container createResp + if err := json.Unmarshal(b, &container); err != nil { + c.Fatal(err) + } + + out, err := inspectField(container.Id, "HostConfig.CpusetCpus") + if err != nil { + c.Fatal(err, out) + } + if out != "" { + c.Fatalf("expected empty string, got %q", out) + } +} + +func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { + config := `{ + "Image": "busybox", + "Cmd": "ls", + "OpenStdin": true, + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, _ := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") + b, err2 := readBody(body) + if err2 != nil { + c.Fatal(err2) + } + + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true) +} + +func (s *DockerSuite) TestStartWithTooLowMemoryLimit(c *check.C) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "busybox")) + if err != nil { + c.Fatal(err, out) + } + + containerID := strings.TrimSpace(out) + + config := `{ + "CpuShares": 100, + "Memory": 524287 + }` + + res, body, _ := sockRequestRaw("POST", "/containers/"+containerID+"/start", strings.NewReader(config), "application/json") + b, err2 := readBody(body) + if err2 != nil { + c.Fatal(err2) + } + + c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) + c.Assert(strings.Contains(string(b), "Minimum memory limit allowed is 4MB"), check.Equals, true) +} + +func (s *DockerSuite) TestContainerApiRename(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh") + out, _, err := runCommandWithOutput(runCmd) + c.Assert(err, check.IsNil) + + containerID := strings.TrimSpace(out) + newName := "TestContainerApiRenameNew" + statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) + + // 204 No Content is expected, not 200 + c.Assert(statusCode, check.Equals, http.StatusNoContent) + c.Assert(err, check.IsNil) + + name, err := inspectField(containerID, "Name") + if name != "/"+newName { + c.Fatalf("Failed to rename container, expected %v, got %v. Container rename API failed", newName, name) + } } diff --git a/integration-cli/docker_api_exec_resize_test.go b/integration-cli/docker_api_exec_resize_test.go new file mode 100644 index 0000000000000..ab753d8ecf163 --- /dev/null +++ b/integration-cli/docker_api_exec_resize_test.go @@ -0,0 +1,23 @@ +package main + +import ( + "net/http" + "os/exec" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestExecResizeApiHeightWidthNoInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf(out, err) + } + cleanedContainerID := strings.TrimSpace(out) + + endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) +} diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go index 1ed99a2561e86..b7957480f50a0 100644 --- a/integration-cli/docker_api_exec_test.go +++ b/integration-cli/docker_api_exec_test.go @@ -5,23 +5,25 @@ package main import ( "bytes" "fmt" + "net/http" "os/exec" - "testing" + + "github.com/go-check/check" ) // Regression test for #9414 -func TestExecApiCreateNoCmd(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecApiCreateNoCmd(c *check.C) { name := "exec_test" runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) - if err == nil || !bytes.Contains(body, []byte("No exec command specified")) { - t.Fatalf("Expected error when creating exec command with no Cmd specified: %q", err) - } + status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) - logDone("exec create API - returns error when missing Cmd") + if !bytes.Contains(body, []byte("No exec command specified")) { + c.Fatalf("Expected message when creating exec command with no Cmd specified") + } } diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go index 38d891fd5ad83..e88fbaeaad9c3 100644 --- a/integration-cli/docker_api_images_test.go +++ b/integration-cli/docker_api_images_test.go @@ -2,25 +2,99 @@ package main import ( "encoding/json" - "testing" + "net/http" + "net/url" + "os/exec" + "strings" "github.com/docker/docker/api/types" + "github.com/go-check/check" ) -func TestLegacyImages(t *testing.T) { - body, err := sockRequest("GET", "/v1.6/images/json", nil) - if err != nil { - t.Fatalf("Error on GET: %s", err) - } +func (s *DockerSuite) TestLegacyImages(c *check.C) { + status, body, err := sockRequest("GET", "/v1.6/images/json", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) images := []types.LegacyImage{} if err = json.Unmarshal(body, &images); err != nil { - t.Fatalf("Error on unmarshal: %s", err) + c.Fatalf("Error on unmarshal: %s", err) } if len(images) == 0 || images[0].Tag == "" || images[0].Repository == "" { - t.Fatalf("Bad data: %q", images) + c.Fatalf("Bad data: %q", images) + } +} + +func (s *DockerSuite) TestApiImagesFilter(c *check.C) { + name := "utest:tag1" + name2 := "utest/docker:tag2" + name3 := "utest:5000/docker:tag3" + for _, n := range []string{name, name2, name3} { + if out, err := exec.Command(dockerBinary, "tag", "busybox", n).CombinedOutput(); err != nil { + c.Fatal(err, out) + } + } + type image struct{ RepoTags []string } + getImages := func(filter string) []image { + v := url.Values{} + v.Set("filter", filter) + status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var images []image + if err := json.Unmarshal(b, &images); err != nil { + c.Fatal(err) + } + + return images + } + + errMsg := "incorrect number of matches returned" + if images := getImages("utest*/*"); len(images[0].RepoTags) != 2 { + c.Fatal(errMsg) + } + if images := getImages("utest"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) + } + if images := getImages("utest*"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) + } + if images := getImages("*5000*/*"); len(images[0].RepoTags) != 1 { + c.Fatal(errMsg) } +} + +func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) { + testRequires(c, Network) + out, err := buildImage("saveandload", "FROM hello-world\nENV FOO bar", false) + if err != nil { + c.Fatal(err) + } + id := strings.TrimSpace(out) - logDone("images - checking legacy json") + res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + defer body.Close() + + if out, err := exec.Command(dockerBinary, "rmi", id).CombinedOutput(); err != nil { + c.Fatal(err, out) + } + + res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") + c.Assert(err, check.IsNil) + c.Assert(res.StatusCode, check.Equals, http.StatusOK) + + defer loadBody.Close() + + inspectOut, err := exec.Command(dockerBinary, "inspect", "--format='{{ .Id }}'", id).CombinedOutput() + if err != nil { + c.Fatal(err, inspectOut) + } + if strings.TrimSpace(string(inspectOut)) != id { + c.Fatal("load did not work properly") + } } diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go new file mode 100644 index 0000000000000..4084289102932 --- /dev/null +++ b/integration-cli/docker_api_info_test.go @@ -0,0 +1,36 @@ +package main + +import ( + "net/http" + "strings" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestInfoApi(c *check.C) { + endpoint := "/info" + + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + // always shown fields + stringsToCheck := []string{ + "ID", + "Containers", + "Images", + "ExecutionDriver", + "LoggingDriver", + "OperatingSystem", + "NCPU", + "MemTotal", + "KernelVersion", + "Driver"} + + out := string(body) + for _, linePrefix := range stringsToCheck { + if !strings.Contains(out, linePrefix) { + c.Errorf("couldn't find string %v in output", linePrefix) + } + } +} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go index e43f10fd6e2bc..b90bdc7120444 100644 --- a/integration-cli/docker_api_inspect_test.go +++ b/integration-cli/docker_api_inspect_test.go @@ -2,18 +2,18 @@ package main import ( "encoding/json" + "net/http" "os/exec" "strings" - "testing" -) -func TestInspectApiContainerResponse(t *testing.T) { - defer deleteAllContainers() + "github.com/go-check/check" +) +func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %s, %v", out, err) + c.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -27,14 +27,13 @@ func TestInspectApiContainerResponse(t *testing.T) { if testVersion != "latest" { endpoint = "/" + testVersion + endpoint } - body, err := sockRequest("GET", endpoint, nil) - if err != nil { - t.Fatalf("sockRequest failed for %s version: %v", testVersion, err) - } + status, body, err := sockRequest("GET", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) var inspectJSON map[string]interface{} if err = json.Unmarshal(body, &inspectJSON); err != nil { - t.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err) + c.Fatalf("unable to unmarshal body for %s version: %v", testVersion, err) } keys := []string{"State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "ExecDriver", "MountLabel", "ProcessLabel", "Volumes", "VolumesRW"} @@ -47,14 +46,12 @@ func TestInspectApiContainerResponse(t *testing.T) { for _, key := range keys { if _, ok := inspectJSON[key]; !ok { - t.Fatalf("%s does not exist in response for %s version", key, testVersion) + c.Fatalf("%s does not exist in response for %s version", key, testVersion) } } //Issue #6830: type not properly converted to JSON/back if _, ok := inspectJSON["Path"].(bool); ok { - t.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") + c.Fatalf("Path of `true` should not be converted to boolean `true` via JSON marshalling") } } - - logDone("container json - check keys in container json response") } diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go new file mode 100644 index 0000000000000..f9284494d2e88 --- /dev/null +++ b/integration-cli/docker_api_logs_test.go @@ -0,0 +1,62 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "net/http" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" +) + +func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + type logOut struct { + out string + res *http.Response + err error + } + chLog := make(chan logOut) + + go func() { + res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") + out, _ := bufio.NewReader(body).ReadString('\n') + chLog <- logOut{strings.TrimSpace(out), res, err} + }() + + select { + case l := <-chLog: + c.Assert(l.err, check.IsNil) + c.Assert(l.res.StatusCode, check.Equals, http.StatusOK) + if !strings.HasSuffix(l.out, "hello") { + c.Fatalf("expected log output to container 'hello', but it does not") + } + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for logs to exit") + } +} + +func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) { + name := "logs_test" + runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) + c.Assert(status, check.Equals, http.StatusBadRequest) + c.Assert(err, check.IsNil) + + expected := "Bad parameters: you must choose at least one stream" + if !bytes.Contains(body, []byte(expected)) { + c.Fatalf("Expected %s, got %s", expected, string(body[:])) + } +} diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go index 2e7677d100c4d..6d5528069952b 100644 --- a/integration-cli/docker_api_resize_test.go +++ b/integration-cli/docker_api_resize_test.go @@ -1,53 +1,62 @@ package main import ( + "net/http" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestResizeApiResponse(t *testing.T) { +func (s *DockerSuite) TestResizeApiResponse(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } - defer deleteAllContainers() cleanedContainerID := strings.TrimSpace(out) endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - _, err = sockRequest("POST", endpoint, nil) + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) +} + +func (s *DockerSuite) TestResizeApiHeightWidthNoInt(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") + out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("resize Request failed %v", err) + c.Fatalf(out, err) } + cleanedContainerID := strings.TrimSpace(out) - logDone("container resize - when started") + endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" + status, _, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) } -func TestResizeApiResponseWhenContainerNotStarted(t *testing.T) { +func (s *DockerSuite) TestResizeApiResponseWhenContainerNotStarted(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } - defer deleteAllContainers() cleanedContainerID := strings.TrimSpace(out) - // make sure the exited cintainer is not running + // make sure the exited container is not running runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - body, err := sockRequest("POST", endpoint, nil) - if err == nil { - t.Fatalf("resize should fail when container is not started") - } + status, body, err := sockRequest("POST", endpoint, nil) + c.Assert(status, check.Equals, http.StatusInternalServerError) + c.Assert(err, check.IsNil) + if !strings.Contains(string(body), "Cannot resize container") && !strings.Contains(string(body), cleanedContainerID) { - t.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) + c.Fatalf("resize should fail with message 'Cannot resize container' but instead received %s", string(body)) } - - logDone("container resize - when not started should not resize") } diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go new file mode 100644 index 0000000000000..b756794c265ee --- /dev/null +++ b/integration-cli/docker_api_version_test.go @@ -0,0 +1,25 @@ +package main + +import ( + "encoding/json" + "net/http" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/autogen/dockerversion" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestGetVersion(c *check.C) { + status, body, err := sockRequest("GET", "/version", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) + + var v types.Version + if err := json.Unmarshal(body, &v); err != nil { + c.Fatal(err) + } + + if v.Version != dockerversion.VERSION { + c.Fatal("Version mismatch") + } +} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go index cf21cda5883f6..fc2ea1a1d1ed7 100644 --- a/integration-cli/docker_cli_attach_test.go +++ b/integration-cli/docker_cli_attach_test.go @@ -1,18 +1,20 @@ package main import ( + "bufio" + "fmt" "io" "os/exec" "strings" "sync" - "testing" "time" + + "github.com/go-check/check" ) const attachWait = 5 * time.Second -func TestAttachMultipleAndRestart(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { endGroup := &sync.WaitGroup{} startGroup := &sync.WaitGroup{} @@ -20,7 +22,7 @@ func TestAttachMultipleAndRestart(t *testing.T) { startGroup.Add(3) if err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done"); err != nil { - t.Fatal(err) + c.Fatal(err) } startDone := make(chan struct{}) @@ -38,32 +40,32 @@ func TestAttachMultipleAndRestart(t *testing.T) { for i := 0; i < 3; i++ { go func() { - c := exec.Command(dockerBinary, "attach", "attacher") + cmd := exec.Command(dockerBinary, "attach", "attacher") defer func() { - c.Wait() + cmd.Wait() endGroup.Done() }() - out, err := c.StdoutPipe() + out, err := cmd.StdoutPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } - if err := c.Start(); err != nil { - t.Fatal(err) + if err := cmd.Start(); err != nil { + c.Fatal(err) } buf := make([]byte, 1024) if _, err := out.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) + c.Fatal(err) } startGroup.Done() if !strings.Contains(string(buf), "hello") { - t.Fatalf("unexpected output %s expected hello\n", string(buf)) + c.Fatalf("unexpected output %s expected hello\n", string(buf)) } }() } @@ -71,66 +73,111 @@ func TestAttachMultipleAndRestart(t *testing.T) { select { case <-startDone: case <-time.After(attachWait): - t.Fatalf("Attaches did not initialize properly") + c.Fatalf("Attaches did not initialize properly") } cmd := exec.Command(dockerBinary, "kill", "attacher") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } select { case <-endDone: case <-time.After(attachWait): - t.Fatalf("Attaches did not finish properly") + c.Fatalf("Attaches did not finish properly") } - logDone("attach - multiple attach") } -func TestAttachTtyWithoutStdin(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestAttachTtyWithoutStdin(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to start container: %v (%v)", out, err) + c.Fatalf("failed to start container: %v (%v)", out, err) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { - t.Fatal(err) + c.Fatal(err) } defer func() { cmd := exec.Command(dockerBinary, "kill", id) if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatalf("failed to kill container: %v (%v)", out, err) + c.Fatalf("failed to kill container: %v (%v)", out, err) } }() - done := make(chan struct{}) + done := make(chan error) go func() { defer close(done) cmd := exec.Command(dockerBinary, "attach", id) if _, err := cmd.StdinPipe(); err != nil { - t.Fatal(err) + done <- err + return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { - t.Fatal("attach should have failed") + done <- fmt.Errorf("attach should have failed") + return } else if !strings.Contains(out, expected) { - t.Fatalf("attach failed with error %q: expected %q", out, expected) + done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) + return } }() select { - case <-done: + case err := <-done: + c.Assert(err, check.IsNil) case <-time.After(attachWait): - t.Fatal("attach is running but should have failed") + c.Fatal("attach is running but should have failed") + } +} + +func (s *DockerSuite) TestAttachDisconnect(c *check.C) { + out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") + id := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "attach", id) + stdin, err := cmd.StdinPipe() + if err != nil { + c.Fatal(err) + } + defer stdin.Close() + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + defer cmd.Process.Kill() + + if _, err := stdin.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + if err := stdin.Close(); err != nil { + c.Fatal(err) + } + + // Expect container to still be running after stdin is closed + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") } - logDone("attach - forbid piped stdin to tty enabled container") } diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go index 8d15735120cb8..82808a5b087f1 100644 --- a/integration-cli/docker_cli_attach_unix_test.go +++ b/integration-cli/docker_cli_attach_unix_test.go @@ -3,37 +3,38 @@ package main import ( + "bufio" "os/exec" "strings" - "testing" "time" + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" "github.com/kr/pty" ) // #9860 -func TestAttachClosedOnContainerStop(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-dti", "busybox", "sleep", "2") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to start container: %v (%v)", out, err) + c.Fatalf("failed to start container: %v (%v)", out, err) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { - t.Fatal(err) + c.Fatal(err) } - done := make(chan struct{}) - + errChan := make(chan error) go func() { - defer close(done) + defer close(errChan) _, tty, err := pty.Open() if err != nil { - t.Fatalf("could not open pty: %v", err) + errChan <- err + return } attachCmd := exec.Command(dockerBinary, "attach", id) attachCmd.Stdin = tty @@ -41,58 +42,61 @@ func TestAttachClosedOnContainerStop(t *testing.T) { attachCmd.Stderr = tty if err := attachCmd.Run(); err != nil { - t.Fatalf("attach returned error %s", err) + errChan <- err + return } }() waitCmd := exec.Command(dockerBinary, "wait", id) if out, _, err = runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + c.Fatalf("error thrown while waiting for container: %s, %v", out, err) } select { - case <-done: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(attachWait): - t.Fatal("timed out without attach returning") + c.Fatal("timed out without attach returning") } - logDone("attach - return after container finished") } -func TestAttachAfterDetach(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { name := "detachtest" cpty, tty, err := pty.Open() if err != nil { - t.Fatalf("Could not open pty: %v", err) + c.Fatalf("Could not open pty: %v", err) } cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty - detached := make(chan struct{}) + errChan := make(chan error) go func() { - if err := cmd.Run(); err != nil { - t.Fatalf("attach returned error %s", err) - } - close(detached) + errChan <- cmd.Run() + close(errChan) }() time.Sleep(500 * time.Millisecond) if err := waitRun(name); err != nil { - t.Fatal(err) + c.Fatal(err) } cpty.Write([]byte{16}) time.Sleep(100 * time.Millisecond) cpty.Write([]byte{17}) - <-detached + select { + case err := <-errChan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("timeout while detaching") + } cpty, tty, err = pty.Open() if err != nil { - t.Fatalf("Could not open pty: %v", err) + c.Fatalf("Could not open pty: %v", err) } cmd = exec.Command(dockerBinary, "attach", name) @@ -101,7 +105,7 @@ func TestAttachAfterDetach(t *testing.T) { cmd.Stderr = tty if err := cmd.Start(); err != nil { - t.Fatal(err) + c.Fatal(err) } bytes := make([]byte, 10) @@ -120,20 +124,162 @@ func TestAttachAfterDetach(t *testing.T) { select { case err := <-readErr: - if err != nil { - t.Fatal(err) - } + c.Assert(err, check.IsNil) case <-time.After(2 * time.Second): - t.Fatal("timeout waiting for attach read") + c.Fatal("timeout waiting for attach read") } if err := cmd.Wait(); err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(string(bytes[:nBytes]), "/ #") { - t.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes])) + c.Fatalf("failed to get a new prompt. got %s", string(bytes[:nBytes])) + } + +} + +// TestAttachDetach checks that attach in tty mode can be detached using the long container ID +func (s *DockerSuite) TestAttachDetach(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := strings.TrimSpace(out) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + if err := waitRun(id); err != nil { + c.Fatalf("error waiting for container to start: %v", err) + } + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } + +} + +// TestAttachDetachTruncatedID checks that attach in tty mode can be detached +func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { + out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") + id := stringid.TruncateID(strings.TrimSpace(out)) + if err := waitRun(id); err != nil { + c.Fatal(err) + } + + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + + cmd := exec.Command(dockerBinary, "attach", id) + cmd.Stdin = tty + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + defer stdout.Close() + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + out, err = bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(id, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + dockerCmd(c, "kill", id) + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") } - logDone("attach - reconnect after detaching") } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 01e6d1d3d68d6..b74dce2cfa906 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -15,19 +15,17 @@ import ( "runtime" "strconv" "strings" - "sync" - "testing" "text/template" "time" "github.com/docker/docker/builder/command" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" ) -func TestBuildJSONEmptyRun(t *testing.T) { +func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { name := "testbuildjsonemptyrun" - defer deleteImages(name) _, err := buildImage( name, @@ -38,15 +36,13 @@ func TestBuildJSONEmptyRun(t *testing.T) { true) if err != nil { - t.Fatal("error when dealing with a RUN statement with empty JSON array") + c.Fatal("error when dealing with a RUN statement with empty JSON array") } - logDone("build - RUN with an empty array should not panic") } -func TestBuildEmptyWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildEmptyWhitespace(c *check.C) { name := "testbuildemptywhitespace" - defer deleteImages(name) _, err := buildImage( name, @@ -59,15 +55,13 @@ func TestBuildEmptyWhitespace(t *testing.T) { true) if err == nil { - t.Fatal("no error when dealing with a COPY statement with no content on the same line") + c.Fatal("no error when dealing with a COPY statement with no content on the same line") } - logDone("build - statements with whitespace and no content should generate a parse error") } -func TestBuildShCmdJSONEntrypoint(t *testing.T) { +func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { name := "testbuildshcmdjsonentrypoint" - defer deleteImages(name) _, err := buildImage( name, @@ -79,7 +73,7 @@ func TestBuildShCmdJSONEntrypoint(t *testing.T) { true) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput( @@ -90,19 +84,17 @@ func TestBuildShCmdJSONEntrypoint(t *testing.T) { name)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if strings.TrimSpace(out) != "/bin/sh -c echo test" { - t.Fatal("CMD did not contain /bin/sh -c") + c.Fatal("CMD did not contain /bin/sh -c") } - logDone("build - CMD should always contain /bin/sh -c when specified without JSON") } -func TestBuildEnvironmentReplacementUser(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) _, err := buildImage(name, ` FROM scratch @@ -110,24 +102,22 @@ func TestBuildEnvironmentReplacementUser(t *testing.T) { USER ${user} `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.User") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != `"foo"` { - t.Fatal("User foo from environment not in Config.User on image") + c.Fatal("User foo from environment not in Config.User on image") } - logDone("build - user environment replacement") } -func TestBuildEnvironmentReplacementVolume(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) _, err := buildImage(name, ` FROM scratch @@ -135,30 +125,28 @@ func TestBuildEnvironmentReplacementVolume(t *testing.T) { VOLUME ${volume} `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { - t.Fatal(err) + c.Fatal(err) } var volumes map[string]interface{} if err := json.Unmarshal([]byte(res), &volumes); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, ok := volumes["/quux"]; !ok { - t.Fatal("Volume /quux from environment not in Config.Volumes on image") + c.Fatal("Volume /quux from environment not in Config.Volumes on image") } - logDone("build - volume environment replacement") } -func TestBuildEnvironmentReplacementExpose(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) _, err := buildImage(name, ` FROM scratch @@ -166,30 +154,28 @@ func TestBuildEnvironmentReplacementExpose(t *testing.T) { EXPOSE ${port} `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, ok := exposedPorts["80/tcp"]; !ok { - t.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") + c.Fatal("Exposed port 80 from environment not in Config.ExposedPorts on image") } - logDone("build - expose environment replacement") } -func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) _, err := buildImage(name, ` FROM busybox @@ -199,15 +185,13 @@ func TestBuildEnvironmentReplacementWorkdir(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - workdir environment replacement") } -func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) ctx, err := fakeContext(` FROM scratch @@ -230,22 +214,19 @@ func TestBuildEnvironmentReplacementAddCopy(t *testing.T) { }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add/copy environment replacement") } -func TestBuildEnvironmentReplacementEnv(t *testing.T) { +func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { name := "testbuildenvironmentreplacement" - defer deleteImages(name) - _, err := buildImage(name, ` FROM busybox @@ -263,18 +244,18 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Env") if err != nil { - t.Fatal(err) + c.Fatal(err) } envResult := []string{} if err = unmarshalJSON([]byte(res), &envResult); err != nil { - t.Fatal(err) + c.Fatal(err) } found := false @@ -285,37 +266,34 @@ func TestBuildEnvironmentReplacementEnv(t *testing.T) { if parts[0] == "bar" { found = true if parts[1] != "zzz" { - t.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) + c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "zzz" { - t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "foo" { - t.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) + c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } } if !found { - t.Fatal("Never found the `bar` env variable") + c.Fatal("Never found the `bar` env variable") } if envCount != 4 { - t.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) + c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) } - logDone("build - env environment replacement") } -func TestBuildHandleEscapes(t *testing.T) { +func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { name := "testbuildhandleescapes" - defer deleteImages(name) - _, err := buildImage(name, ` FROM scratch @@ -324,22 +302,22 @@ func TestBuildHandleEscapes(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } var result map[string]map[string]struct{} res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { - t.Fatal(err) + c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, ok := result["bar"]; !ok { - t.Fatal("Could not find volume bar set from env foo in volumes table") + c.Fatal("Could not find volume bar set from env foo in volumes table") } deleteImages(name) @@ -352,20 +330,20 @@ func TestBuildHandleEscapes(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { - t.Fatal(err) + c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, ok := result["${FOO}"]; !ok { - t.Fatal("Could not find volume ${FOO} set from env foo in volumes table") + c.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } deleteImages(name) @@ -382,31 +360,28 @@ func TestBuildHandleEscapes(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err = inspectFieldJSON(name, "Config.Volumes") if err != nil { - t.Fatal(err) + c.Fatal(err) } if err = unmarshalJSON([]byte(res), &result); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, ok := result[`\\\${FOO}`]; !ok { - t.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) + c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) } - logDone("build - handle escapes") } -func TestBuildOnBuildLowercase(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" - defer deleteImages(name, name2) - _, err := buildImage(name, ` FROM busybox @@ -414,7 +389,7 @@ func TestBuildOnBuildLowercase(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } _, out, err := buildImageWithOut(name2, fmt.Sprintf(` @@ -422,24 +397,21 @@ func TestBuildOnBuildLowercase(t *testing.T) { `, name), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "quux") { - t.Fatalf("Did not receive the expected echo text, got %s", out) + c.Fatalf("Did not receive the expected echo text, got %s", out) } if strings.Contains(out, "ONBUILD ONBUILD") { - t.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) + c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) } - logDone("build - handle case-insensitive onbuild statement") } -func TestBuildEnvEscapes(t *testing.T) { +func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { name := "testbuildenvescapes" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, ` FROM busybox @@ -451,20 +423,17 @@ func TestBuildEnvEscapes(t *testing.T) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if strings.TrimSpace(out) != "$" { - t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } - logDone("build - env should handle \\$ properly") } -func TestBuildEnvOverwrite(t *testing.T) { +func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { name := "testbuildenvoverwrite" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, ` @@ -475,32 +444,28 @@ func TestBuildEnvOverwrite(t *testing.T) { true) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-e", "TEST=bar", "-t", name)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if strings.TrimSpace(out) != "bar" { - t.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) + c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } - logDone("build - env should overwrite builder ENV during run") } -func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" - defer deleteImages("onbuild") - defer deleteImages(name) - defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -508,7 +473,7 @@ func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name, @@ -516,25 +481,21 @@ func TestBuildOnBuildForbiddenMaintainerInSourceImage(t *testing.T) { true) if err != nil { if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { - t.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) + c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden maintainer in source image") } -func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenfrominsourceimage" - defer deleteImages("onbuild") - defer deleteImages(name) - defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -542,7 +503,7 @@ func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name, @@ -550,25 +511,21 @@ func TestBuildOnBuildForbiddenFromInSourceImage(t *testing.T) { true) if err != nil { if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { - t.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) + c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden from in source image") } -func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenchainedinsourceimage" - defer deleteImages("onbuild") - defer deleteImages(name) - defer deleteAllContainers() createCmd := exec.Command(dockerBinary, "create", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(createCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -576,7 +533,7 @@ func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { commitCmd := exec.Command(dockerBinary, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") if _, err := runCommand(commitCmd); err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name, @@ -584,23 +541,18 @@ func TestBuildOnBuildForbiddenChainedInSourceImage(t *testing.T) { true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { - t.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) + c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden chained in source image") } -func TestBuildOnBuildCmdEntrypointJSON(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteImages(name2) - defer deleteImages(name1) - defer deleteAllContainers() - _, err := buildImage(name1, ` FROM busybox ONBUILD CMD ["hello world"] @@ -609,71 +561,64 @@ ONBUILD RUN ["true"]`, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - t.Fatal("did not get echo output from onbuild", out) + c.Fatal("did not get echo output from onbuild", out) } - logDone("build - onbuild with json entrypoint/cmd") } -func TestBuildOnBuildEntrypointJSON(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" - defer deleteImages(name2) - defer deleteImages(name1) - defer deleteAllContainers() - _, err := buildImage(name1, ` FROM busybox ONBUILD ENTRYPOINT ["echo"]`, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - t.Fatal("got malformed output from onbuild", out) + c.Fatal("got malformed output from onbuild", out) } - logDone("build - onbuild with json entrypoint") } -func TestBuildCacheADD(t *testing.T) { +func (s *DockerSuite) TestBuildCacheADD(c *check.C) { name := "testbuildtwoimageswithadd" - defer deleteImages(name) server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -681,10 +626,10 @@ func TestBuildCacheADD(t *testing.T) { fmt.Sprintf(`FROM scratch ADD %s/robots.txt /`, server.URL()), true); err != nil { - t.Fatal(err) + c.Fatal(err) } if err != nil { - t.Fatal(err) + c.Fatal(err) } deleteImages(name) _, out, err := buildImageWithOut(name, @@ -692,24 +637,22 @@ func TestBuildCacheADD(t *testing.T) { ADD %s/index.html /`, server.URL()), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if strings.Contains(out, "Using cache") { - t.Fatal("2nd build used cache on ADD, it shouldn't") + c.Fatal("2nd build used cache on ADD, it shouldn't") } - logDone("build - build two images with remote ADD") } -func TestBuildLastModified(t *testing.T) { +func (s *DockerSuite) TestBuildLastModified(c *check.C) { name := "testbuildlastmodified" - defer deleteImages(name) server, err := fakeStorage(map[string]string{ "file": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -722,13 +665,13 @@ RUN ls -le /file` dockerfile := fmt.Sprintf(dFmt, server.URL()) if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { - t.Fatal(err) + c.Fatal(err) } originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) // Make sure our regexp is correct if strings.Index(originMTime, "/file") < 0 { - t.Fatalf("Missing ls info on 'file':\n%s", out) + c.Fatalf("Missing ls info on 'file':\n%s", out) } // Build it again and make sure the mtime of the file didn't change. @@ -736,12 +679,12 @@ RUN ls -le /file` time.Sleep(2 * time.Second) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { - t.Fatal(err) + c.Fatal(err) } newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime != originMTime { - t.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) + c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) } // Now 'touch' the file and make sure the timestamp DID change this time @@ -750,45 +693,41 @@ RUN ls -le /file` "file": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { - t.Fatal(err) + c.Fatal(err) } newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime == originMTime { - t.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) + c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) } - logDone("build - use Last-Modified header") } -func TestBuildSixtySteps(t *testing.T) { +func (s *DockerSuite) TestBuildSixtySteps(c *check.C) { name := "foobuildsixtysteps" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\n"+strings.Repeat("ADD foo /\n", 60), map[string]string{ "foo": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - build an image with sixty build steps") } -func TestBuildAddSingleFileToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { name := "testaddimg" - defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -802,48 +741,44 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expecte "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add single file to root") } // Issue #3960: "ADD src ." hangs -func TestBuildAddSingleFileToWorkdir(t *testing.T) { +func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { name := "testaddsinglefiletoworkdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() - done := make(chan struct{}) + errChan := make(chan error) go func() { - if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) - } - close(done) + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) }() select { case <-time.After(5 * time.Second): - t.Fatal("Build with adding to workdir timed out") - case <-done: + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) } - logDone("build - add single file to workdir") } -func TestBuildAddSingleFileToExistDir(t *testing.T) { +func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { name := "testaddsinglefiletoexistdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -858,27 +793,25 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add single file to existing dir") } -func TestBuildCopyAddMultipleFiles(t *testing.T) { +func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { server, err := fakeStorage(map[string]string{ "robots.txt": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() name := "testcopymultiplefilestofile" - defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -905,18 +838,16 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - multiple file copy/add tests") } -func TestBuildAddMultipleFilesToFile(t *testing.T) { +func (s *DockerSuite) TestBuildAddMultipleFilesToFile(c *check.C) { name := "testaddmultiplefilestofile" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD file1.txt file2.txt test `, @@ -926,20 +857,18 @@ func TestBuildAddMultipleFilesToFile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple add files to file") } -func TestBuildJSONAddMultipleFilesToFile(t *testing.T) { +func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFile(c *check.C) { name := "testjsonaddmultiplefilestofile" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ["file1.txt", "file2.txt", "test"] `, @@ -949,20 +878,18 @@ func TestBuildJSONAddMultipleFilesToFile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple add files to file json syntax") } -func TestBuildAddMultipleFilesToFileWild(t *testing.T) { +func (s *DockerSuite) TestBuildAddMultipleFilesToFileWild(c *check.C) { name := "testaddmultiplefilestofilewild" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD file*.txt test `, @@ -972,20 +899,18 @@ func TestBuildAddMultipleFilesToFileWild(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple add files to file wild") } -func TestBuildJSONAddMultipleFilesToFileWild(t *testing.T) { +func (s *DockerSuite) TestBuildJSONAddMultipleFilesToFileWild(c *check.C) { name := "testjsonaddmultiplefilestofilewild" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ["file*.txt", "test"] `, @@ -995,20 +920,18 @@ func TestBuildJSONAddMultipleFilesToFileWild(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple add files to file wild json syntax") } -func TestBuildCopyMultipleFilesToFile(t *testing.T) { +func (s *DockerSuite) TestBuildCopyMultipleFilesToFile(c *check.C) { name := "testcopymultiplefilestofile" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY file1.txt file2.txt test `, @@ -1018,20 +941,18 @@ func TestBuildCopyMultipleFilesToFile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple copy files to file") } -func TestBuildJSONCopyMultipleFilesToFile(t *testing.T) { +func (s *DockerSuite) TestBuildJSONCopyMultipleFilesToFile(c *check.C) { name := "testjsoncopymultiplefilestofile" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY ["file1.txt", "file2.txt", "test"] `, @@ -1041,20 +962,18 @@ func TestBuildJSONCopyMultipleFilesToFile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple copy files to file json syntax") } -func TestBuildAddFileWithWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { name := "testaddfilewithwhitespace" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" @@ -1080,18 +999,16 @@ RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add file with whitespace") } -func TestBuildCopyFileWithWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { name := "testcopyfilewithwhitespace" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" @@ -1117,18 +1034,16 @@ RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy file with whitespace") } -func TestBuildAddMultipleFilesToFileWithWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildAddMultipleFilesToFileWithWhitespace(c *check.C) { name := "testaddmultiplefilestofilewithwhitespace" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD [ "test file1", "test file2", "test" ] `, @@ -1138,20 +1053,18 @@ func TestBuildAddMultipleFilesToFileWithWhitespace(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using ADD with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple add files to file with whitespace") } -func TestBuildCopyMultipleFilesToFileWithWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildCopyMultipleFilesToFileWithWhitespace(c *check.C) { name := "testcopymultiplefilestofilewithwhitespace" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY [ "test file1", "test file2", "test" ] `, @@ -1161,26 +1074,24 @@ func TestBuildCopyMultipleFilesToFileWithWhitespace(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "When using COPY with more than one source file, the destination must be a directory and end with a /" if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain %q) got:\n%v", expected, err) } - logDone("build - multiple copy files to file with whitespace") } -func TestBuildCopyWildcard(t *testing.T) { +func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { name := "testcopywildcard" - defer deleteImages(name) server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -1203,52 +1114,48 @@ func TestBuildCopyWildcard(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Now make sure we use a cache the 2nd time id2, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("didn't use the cache") + c.Fatal("didn't use the cache") } - logDone("build - copy wild card") } -func TestBuildCopyWildcardNoFind(t *testing.T) { +func (s *DockerSuite) TestBuildCopyWildcardNoFind(c *check.C) { name := "testcopywildcardnofind" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY file*.txt /tmp/ `, nil) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImageFromContext(name, ctx, true) if err == nil { - t.Fatal("should have failed to find a file") + c.Fatal("should have failed to find a file") } if !strings.Contains(err.Error(), "No source files were specified") { - t.Fatalf("Wrong error %v, must be about no source files", err) + c.Fatalf("Wrong error %v, must be about no source files", err) } - logDone("build - copy wild card no find") } -func TestBuildCopyWildcardCache(t *testing.T) { +func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { name := "testcopywildcardcache" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY file1.txt /tmp/`, map[string]string{ @@ -1256,12 +1163,12 @@ func TestBuildCopyWildcardCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Now make sure we use a cache the 2nd time even with wild cards. @@ -1271,19 +1178,17 @@ func TestBuildCopyWildcardCache(t *testing.T) { id2, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("didn't use the cache") + c.Fatal("didn't use the cache") } - logDone("build - copy wild card cache") } -func TestBuildAddSingleFileToNonExistingDir(t *testing.T) { +func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { name := "testaddsinglefiletononexistingdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1297,20 +1202,18 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add single file to non-existing dir") } -func TestBuildAddDirContentToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { name := "testadddircontenttoroot" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1323,19 +1226,17 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add directory contents to root") } -func TestBuildAddDirContentToExistingDir(t *testing.T) { +func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { name := "testadddircontenttoexistingdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1350,19 +1251,17 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add directory contents to existing dir") } -func TestBuildAddWholeDirToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { name := "testaddwholedirtoroot" - defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1378,40 +1277,36 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expecte "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add whole directory to root") } // Testing #5941 -func TestBuildAddEtcToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { name := "testaddetctoroot" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add etc directory to root") } // Testing #9401 -func TestBuildAddPreservesFilesSpecialBits(t *testing.T) { +func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { name := "testaddpreservesfilesspecialbits" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin @@ -1423,19 +1318,17 @@ RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, "/data/usr/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add preserves files special bits") } -func TestBuildCopySingleFileToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { name := "testcopysinglefiletoroot" - defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1449,48 +1342,44 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expecte "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy single file to root") } // Issue #3960: "ADD src ." hangs - adapted for COPY -func TestBuildCopySingleFileToWorkdir(t *testing.T) { +func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { name := "testcopysinglefiletoworkdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox COPY test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() - done := make(chan struct{}) + errChan := make(chan error) go func() { - if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) - } - close(done) + _, err := buildImageFromContext(name, ctx, true) + errChan <- err + close(errChan) }() select { case <-time.After(5 * time.Second): - t.Fatal("Build with adding to workdir timed out") - case <-done: + c.Fatal("Build with adding to workdir timed out") + case err := <-errChan: + c.Assert(err, check.IsNil) } - logDone("build - copy single file to workdir") } -func TestBuildCopySingleFileToExistDir(t *testing.T) { +func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { name := "testcopysinglefiletoexistdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1505,19 +1394,17 @@ RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy single file to existing dir") } -func TestBuildCopySingleFileToNonExistDir(t *testing.T) { +func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { name := "testcopysinglefiletononexistdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1531,19 +1418,17 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, "test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy single file to non-existing dir") } -func TestBuildCopyDirContentToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { name := "testcopydircontenttoroot" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1556,19 +1441,17 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy directory contents to root") } -func TestBuildCopyDirContentToExistDir(t *testing.T) { +func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { name := "testcopydircontenttoexistdir" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1583,19 +1466,17 @@ RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy directory contents to existing dir") } -func TestBuildCopyWholeDirToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { name := "testcopywholedirtoroot" - defer deleteImages(name) ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group @@ -1611,48 +1492,43 @@ RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expecte "test_dir/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy whole directory to root") } -func TestBuildCopyEtcToRoot(t *testing.T) { +func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { name := "testcopyetctoroot" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch COPY . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - copy etc directory to root") } -func TestBuildCopyDisallowRemote(t *testing.T) { +func (s *DockerSuite) TestBuildCopyDisallowRemote(c *check.C) { name := "testcopydisallowremote" - defer deleteImages(name) _, out, err := buildImageWithOut(name, `FROM scratch COPY https://index.docker.io/robots.txt /`, true) if err == nil || !strings.Contains(out, "Source can't be a URL for COPY") { - t.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) + c.Fatalf("Error should be about disallowed remote source, got err: %s, out: %q", err, out) } - logDone("build - copy - disallow copy from remote") } -func TestBuildAddBadLinks(t *testing.T) { +func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { const ( dockerfile = ` FROM scratch @@ -1664,16 +1540,15 @@ func TestBuildAddBadLinks(t *testing.T) { var ( name = "test-link-absolute" ) - defer deleteImages(name) ctx, err := fakeContext(dockerfile, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") if err != nil { - t.Fatalf("failed to create temporary directory: %s", tempDir) + c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) @@ -1681,7 +1556,7 @@ func TestBuildAddBadLinks(t *testing.T) { if runtime.GOOS == "windows" { var driveLetter string if abs, err := filepath.Abs(tempDir); err != nil { - t.Fatal(err) + c.Fatal(err) } else { driveLetter = abs[:1] } @@ -1697,7 +1572,7 @@ func TestBuildAddBadLinks(t *testing.T) { tarOut, err := os.Create(tarPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } tarWriter := tar.NewWriter(tarOut) @@ -1713,7 +1588,7 @@ func TestBuildAddBadLinks(t *testing.T) { err = tarWriter.WriteHeader(header) if err != nil { - t.Fatal(err) + c.Fatal(err) } tarWriter.Close() @@ -1721,26 +1596,25 @@ func TestBuildAddBadLinks(t *testing.T) { foo, err := os.Create(fooPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { - t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } - logDone("build - ADD must add files in container") } -func TestBuildAddBadLinksVolume(t *testing.T) { +func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { const ( dockerfileTemplate = ` FROM busybox @@ -1753,11 +1627,10 @@ func TestBuildAddBadLinksVolume(t *testing.T) { name = "test-link-absolute-volume" dockerfile = "" ) - defer deleteImages(name) tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") if err != nil { - t.Fatalf("failed to create temporary directory: %s", tempDir) + c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) @@ -1766,76 +1639,73 @@ func TestBuildAddBadLinksVolume(t *testing.T) { ctx, err := fakeContext(dockerfile, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) foo, err := os.Create(fooPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { - t.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) + c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } - logDone("build - ADD should add files in volume") } // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. -func TestBuildWithInaccessibleFilesInContext(t *testing.T) { - testRequires(t, UnixCli) // test uses chown/chmod: not available on windows +func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { + testRequires(c, UnixCli) // test uses chown/chmod: not available on windows { name := "testbuildinaccessiblefiles" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { - t.Fatalf("failed to chown file to root: %s", err) + c.Fatalf("failed to chown file to root: %s", err) } if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { - t.Fatalf("failed to chmod file to 700: %s", err) + c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { - t.Fatalf("build should have failed: %s %s", err, out) + c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "no permission to read from ") { - t.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) + c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) } if !strings.Contains(out, "Error checking context is accessible") { - t.Fatalf("output should've contained the string: Error checking context is accessible") + c.Fatalf("output should've contained the string: Error checking context is accessible") } } { name := "testbuildinaccessibledirectory" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client @@ -1843,116 +1713,111 @@ func TestBuildWithInaccessibleFilesInContext(t *testing.T) { pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { - t.Fatalf("failed to chown directory to root: %s", err) + c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { - t.Fatalf("failed to chmod directory to 444: %s", err) + c.Fatalf("failed to chmod directory to 444: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { - t.Fatalf("failed to chmod file to 700: %s", err) + c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { - t.Fatalf("build should have failed: %s %s", err, out) + c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "can't stat") { - t.Fatalf("output should've contained the string: can't access %s", out) + c.Fatalf("output should've contained the string: can't access %s", out) } if !strings.Contains(out, "Error checking context is accessible") { - t.Fatalf("output should've contained the string: Error checking context is accessible") + c.Fatalf("output should've contained the string: Error checking context is accessible") } } { name := "testlinksok" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } } { name := "testbuildignoredinaccessible" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { - t.Fatalf("failed to chown directory to root: %s", err) + c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { - t.Fatalf("failed to chmod directory to 755: %s", err) + c.Fatalf("failed to chmod directory to 755: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { - t.Fatalf("failed to chmod file to 444: %s", err) + c.Fatalf("failed to chmod file to 444: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err != nil { - t.Fatalf("build should have worked: %s %s", err, out) + c.Fatalf("build should have worked: %s %s", err, out) } } - logDone("build - ADD from context with inaccessible files must not pass") } -func TestBuildForceRm(t *testing.T) { +func (s *DockerSuite) TestBuildForceRm(c *check.C) { containerCountBefore, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } name := "testbuildforcerm" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nRUN true\nRUN thiswillfail", nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() buildCmd := exec.Command(dockerBinary, "build", "-t", name, "--force-rm", ".") buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err == nil { - t.Fatalf("failed to build the image: %s, %v", out, err) + c.Fatalf("failed to build the image: %s, %v", out, err) } containerCountAfter, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { - t.Fatalf("--force-rm shouldn't have left containers behind") + c.Fatalf("--force-rm shouldn't have left containers behind") } - logDone("build - ensure --force-rm doesn't leave containers behind") } // Test that an infinite sleep during a build is killed if the client disconnects. @@ -1962,18 +1827,13 @@ func TestBuildForceRm(t *testing.T) { // * Run a 1-year-long sleep from a docker build. // * When docker events sees container start, close the "docker build" command // * Wait for docker events to emit a dying event. -func TestBuildCancelationKillsSleep(t *testing.T) { - var wg sync.WaitGroup - defer wg.Wait() - +func (s *DockerSuite) TestBuildCancelationKillsSleep(c *check.C) { name := "testbuildcancelation" - defer deleteImages(name) - defer deleteAllContainers() // (Note: one year, will never finish) ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() @@ -1984,27 +1844,22 @@ func TestBuildCancelationKillsSleep(t *testing.T) { eventDie := make(chan struct{}) containerID := make(chan string) - startEpoch := daemonTime(t).Unix() + startEpoch := daemonTime(c).Unix() + // Watch for events since epoch. + eventsCmd := exec.Command( + dockerBinary, "events", + "--since", strconv.FormatInt(startEpoch, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatal(err) + } + defer eventsCmd.Process.Kill() - wg.Add(1) // Goroutine responsible for watching start/die events from `docker events` go func() { - defer wg.Done() - // Watch for events since epoch. - eventsCmd := exec.Command( - dockerBinary, "events", - "--since", strconv.FormatInt(startEpoch, 10)) - stdout, err := eventsCmd.StdoutPipe() - err = eventsCmd.Start() - if err != nil { - t.Fatalf("failed to start 'docker events': %s", err) - } - - go func() { - <-finish - eventsCmd.Process.Kill() - }() - cid := <-containerID matchStart := regexp.MustCompile(cid + `(.*) start$`) @@ -2022,20 +1877,14 @@ func TestBuildCancelationKillsSleep(t *testing.T) { close(eventDie) } } - - err = eventsCmd.Wait() - if err != nil && !IsKilled(err) { - t.Fatalf("docker events had bad exit status: %s", err) - } }() buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") buildCmd.Dir = ctx.Dir stdoutBuild, err := buildCmd.StdoutPipe() - err = buildCmd.Start() - if err != nil { - t.Fatalf("failed to run build: %s", err) + if err := buildCmd.Start(); err != nil { + c.Fatalf("failed to run build: %s", err) } matchCID := regexp.MustCompile("Running in ") @@ -2050,7 +1899,7 @@ func TestBuildCancelationKillsSleep(t *testing.T) { select { case <-time.After(5 * time.Second): - t.Fatal("failed to observe build container start in timely fashion") + c.Fatal("failed to observe build container start in timely fashion") case <-eventStart: // Proceeds from here when we see the container fly past in the // output of "docker events". @@ -2059,56 +1908,52 @@ func TestBuildCancelationKillsSleep(t *testing.T) { // Send a kill to the `docker build` command. // Causes the underlying build to be cancelled due to socket close. - err = buildCmd.Process.Kill() - if err != nil { - t.Fatalf("error killing build command: %s", err) + if err := buildCmd.Process.Kill(); err != nil { + c.Fatalf("error killing build command: %s", err) } // Get the exit status of `docker build`, check it exited because killed. - err = buildCmd.Wait() - if err != nil && !IsKilled(err) { - t.Fatalf("wait failed during build run: %T %s", err, err) + if err := buildCmd.Wait(); err != nil && !IsKilled(err) { + c.Fatalf("wait failed during build run: %T %s", err, err) } select { case <-time.After(5 * time.Second): // If we don't get here in a timely fashion, it wasn't killed. - t.Fatal("container cancel did not succeed") + c.Fatal("container cancel did not succeed") case <-eventDie: // We saw the container shut down in the `docker events` stream, // as expected. } - logDone("build - ensure canceled job finishes immediately") } -func TestBuildRm(t *testing.T) { +func (s *DockerSuite) TestBuildRm(c *check.C) { name := "testbuildrm" - defer deleteImages(name) ctx, err := fakeContext("FROM scratch\nADD foo /\nADD foo /", map[string]string{"foo": "bar"}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() { containerCountBefore, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm", "-t", name, ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") if err != nil { - t.Fatal("failed to build the image", out) + c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { - t.Fatalf("-rm shouldn't have left containers behind") + c.Fatalf("-rm shouldn't have left containers behind") } deleteImages(name) } @@ -2116,22 +1961,22 @@ func TestBuildRm(t *testing.T) { { containerCountBefore, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name, ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") if err != nil { - t.Fatal("failed to build the image", out) + c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { - t.Fatalf("--rm shouldn't have left containers behind") + c.Fatalf("--rm shouldn't have left containers behind") } deleteImages(name) } @@ -2139,32 +1984,30 @@ func TestBuildRm(t *testing.T) { { containerCountBefore, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "--rm=false", "-t", name, ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") if err != nil { - t.Fatal("failed to build the image", out) + c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { - t.Fatalf("failed to get the container count: %s", err) + c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore == containerCountAfter { - t.Fatalf("--rm=false should have left containers behind") + c.Fatalf("--rm=false should have left containers behind") } - deleteAllContainers() deleteImages(name) } - logDone("build - ensure --rm doesn't leave containers behind and that --rm=true is the default") } -func TestBuildWithVolumes(t *testing.T) { +func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { var ( result map[string]map[string]struct{} name = "testbuildvolumes" @@ -2180,7 +2023,6 @@ func TestBuildWithVolumes(t *testing.T) { "/test8]": emptyMap, } ) - defer deleteImages(name) _, err := buildImage(name, `FROM scratch VOLUME /test1 @@ -2191,52 +2033,48 @@ func TestBuildWithVolumes(t *testing.T) { `, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Volumes") if err != nil { - t.Fatal(err) + c.Fatal(err) } err = unmarshalJSON([]byte(res), &result) if err != nil { - t.Fatal(err) + c.Fatal(err) } equal := reflect.DeepEqual(&result, &expected) if !equal { - t.Fatalf("Volumes %s, expected %s", result, expected) + c.Fatalf("Volumes %s, expected %s", result, expected) } - logDone("build - with volumes") } -func TestBuildMaintainer(t *testing.T) { +func (s *DockerSuite) TestBuildMaintainer(c *check.C) { name := "testbuildmaintainer" expected := "dockerio" - defer deleteImages(name) _, err := buildImage(name, `FROM scratch MAINTAINER dockerio`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Maintainer %s, expected %s", res, expected) + c.Fatalf("Maintainer %s, expected %s", res, expected) } - logDone("build - maintainer") } -func TestBuildUser(t *testing.T) { +func (s *DockerSuite) TestBuildUser(c *check.C) { name := "testbuilduser" expected := "dockerio" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd @@ -2244,22 +2082,20 @@ func TestBuildUser(t *testing.T) { RUN [ $(whoami) = 'dockerio' ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.User") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("User %s, expected %s", res, expected) + c.Fatalf("User %s, expected %s", res, expected) } - logDone("build - user") } -func TestBuildRelativeWorkdir(t *testing.T) { +func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { name := "testbuildrelativeworkdir" expected := "/test2/test3" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN [ "$PWD" = '/' ] @@ -2271,22 +2107,20 @@ func TestBuildRelativeWorkdir(t *testing.T) { RUN [ "$PWD" = '/test2/test3' ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Workdir %s, expected %s", res, expected) + c.Fatalf("Workdir %s, expected %s", res, expected) } - logDone("build - relative workdir") } -func TestBuildWorkdirWithEnvVariables(t *testing.T) { +func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { name := "testbuildworkdirwithenvvariables" expected := "/test1/test2" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENV DIRPATH /test1 @@ -2295,21 +2129,19 @@ func TestBuildWorkdirWithEnvVariables(t *testing.T) { WORKDIR $SUBDIRNAME/$MISSING_VAR`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.WorkingDir") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Workdir %s, expected %s", res, expected) + c.Fatalf("Workdir %s, expected %s", res, expected) } - logDone("build - workdir with env variables") } -func TestBuildRelativeCopy(t *testing.T) { +func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { name := "testbuildrelativecopy" - defer deleteImages(name) dockerfile := ` FROM busybox WORKDIR /test1 @@ -2338,19 +2170,17 @@ func TestBuildRelativeCopy(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImageFromContext(name, ctx, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - relative copy/add") } -func TestBuildEnv(t *testing.T) { +func (s *DockerSuite) TestBuildEnv(c *check.C) { name := "testbuildenv" expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENV PATH /test:$PATH @@ -2358,116 +2188,106 @@ func TestBuildEnv(t *testing.T) { RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Env") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Env %s, expected %s", res, expected) + c.Fatalf("Env %s, expected %s", res, expected) } - logDone("build - env") } -func TestBuildContextCleanup(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { + testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" - defer deleteImages(name) entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { - t.Fatalf("failed to list contents of tmp dir: %s", err) + c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { - t.Fatalf("failed to list contents of tmp dir: %s", err) + c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { - t.Fatalf("context should have been deleted, but wasn't") + c.Fatalf("context should have been deleted, but wasn't") } - logDone("build - verify context cleanup works properly") } -func TestBuildContextCleanupFailedBuild(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { + testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" - defer deleteImages(name) - defer deleteAllContainers() entries, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { - t.Fatalf("failed to list contents of tmp dir: %s", err) + c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch RUN /non/existing/command`, true) if err == nil { - t.Fatalf("expected build to fail, but it didn't") + c.Fatalf("expected build to fail, but it didn't") } entriesFinal, err := ioutil.ReadDir("/var/lib/docker/tmp") if err != nil { - t.Fatalf("failed to list contents of tmp dir: %s", err) + c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { - t.Fatalf("context should have been deleted, but wasn't") + c.Fatalf("context should have been deleted, but wasn't") } - logDone("build - verify context cleanup works properly after an unsuccessful build") } -func TestBuildCmd(t *testing.T) { +func (s *DockerSuite) TestBuildCmd(c *check.C) { name := "testbuildcmd" - expected := "[/bin/echo Hello World]" - defer deleteImages(name) + expected := "{[/bin/echo Hello World]}" _, err := buildImage(name, `FROM scratch CMD ["/bin/echo", "Hello World"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Cmd %s, expected %s", res, expected) + c.Fatalf("Cmd %s, expected %s", res, expected) } - logDone("build - cmd") } -func TestBuildExpose(t *testing.T) { +func (s *DockerSuite) TestBuildExpose(c *check.C) { name := "testbuildexpose" - expected := "map[2375/tcp:map[]]" - defer deleteImages(name) + expected := "map[2375/tcp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Exposed ports %s, expected %s", res, expected) + c.Fatalf("Exposed ports %s, expected %s", res, expected) } - logDone("build - expose") } -func TestBuildExposeMorePorts(t *testing.T) { +func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { // start building docker file with a large number of ports portList := make([]string, 50) line := make([]string, 100) @@ -2493,127 +2313,118 @@ func TestBuildExposeMorePorts(t *testing.T) { tmpl.Execute(buf, portList) name := "testbuildexpose" - defer deleteImages(name) _, err := buildImage(name, buf.String(), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // check if all the ports are saved inside Config.ExposedPorts res, err := inspectFieldJSON(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { - t.Fatal(err) + c.Fatal(err) } for _, p := range expectedPorts { ep := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[ep]; !ok { - t.Errorf("Port(%s) is not exposed", ep) + c.Errorf("Port(%s) is not exposed", ep) } else { delete(exposedPorts, ep) } } if len(exposedPorts) != 0 { - t.Errorf("Unexpected extra exposed ports %v", exposedPorts) + c.Errorf("Unexpected extra exposed ports %v", exposedPorts) } - logDone("build - expose large number of ports") } -func TestBuildExposeOrder(t *testing.T) { +func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { buildID := func(name, exposed string) string { _, err := buildImage(name, fmt.Sprintf(`FROM scratch EXPOSE %s`, exposed), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id, err := inspectField(name, "Id") if err != nil { - t.Fatal(err) + c.Fatal(err) } return id } id1 := buildID("testbuildexpose1", "80 2375") id2 := buildID("testbuildexpose2", "2375 80") - defer deleteImages("testbuildexpose1", "testbuildexpose2") if id1 != id2 { - t.Errorf("EXPOSE should invalidate the cache only when ports actually changed") + c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") } - logDone("build - expose order") } -func TestBuildExposeUpperCaseProto(t *testing.T) { +func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { name := "testbuildexposeuppercaseproto" - expected := "map[5678/udp:map[]]" - defer deleteImages(name) + expected := "map[5678/udp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 5678/UDP`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Exposed ports %s, expected %s", res, expected) + c.Fatalf("Exposed ports %s, expected %s", res, expected) } - logDone("build - expose port with upper case proto") } -func TestBuildExposeHostPort(t *testing.T) { +func (s *DockerSuite) TestBuildExposeHostPort(c *check.C) { // start building docker file with ip:hostPort:containerPort name := "testbuildexpose" - expected := "map[5678/tcp:map[]]" - defer deleteImages(name) + expected := "map[5678/tcp:{}]" _, out, err := buildImageWithOut(name, `FROM scratch EXPOSE 192.168.1.2:2375:5678`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "to map host ports to container ports (ip:hostPort:containerPort) is deprecated.") { - t.Fatal("Missing warning message") + c.Fatal("Missing warning message") } res, err := inspectField(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Exposed ports %s, expected %s", res, expected) + c.Fatalf("Exposed ports %s, expected %s", res, expected) } - logDone("build - ignore exposing host's port") } -func TestBuildEmptyEntrypointInheritance(t *testing.T) { +func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { name := "testbuildentrypointinheritance" name2 := "testbuildentrypointinheritance2" - defer deleteImages(name, name2) _, err := buildImage(name, `FROM busybox ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } - expected := "[/bin/echo]" + expected := "{[/bin/echo]}" if res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + c.Fatalf("Entrypoint %s, expected %s", res, expected) } _, err = buildImage(name2, @@ -2621,69 +2432,64 @@ func TestBuildEmptyEntrypointInheritance(t *testing.T) { ENTRYPOINT []`, name), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err = inspectField(name2, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } - expected = "[]" + expected = "{[]}" if res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + c.Fatalf("Entrypoint %s, expected %s", res, expected) } - logDone("build - empty entrypoint inheritance") } -func TestBuildEmptyEntrypoint(t *testing.T) { +func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { name := "testbuildentrypoint" - defer deleteImages(name) - expected := "[]" + expected := "{[]}" _, err := buildImage(name, `FROM busybox ENTRYPOINT []`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + c.Fatalf("Entrypoint %s, expected %s", res, expected) } - logDone("build - empty entrypoint") } -func TestBuildEntrypoint(t *testing.T) { +func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { name := "testbuildentrypoint" - expected := "[/bin/echo]" - defer deleteImages(name) + expected := "{[/bin/echo]}" _, err := buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + c.Fatalf("Entrypoint %s, expected %s", res, expected) } - logDone("build - entrypoint") } // #6445 ensure ONBUILD triggers aren't committed to grandchildren -func TestBuildOnBuildLimitedInheritence(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { var ( out2, out3 string ) @@ -2696,15 +2502,14 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { ` ctx, err := fakeContext(dockerfile1, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() - out1, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", name1, ".") + out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") if err != nil { - t.Fatalf("build failed to complete: %s, %v", out1, err) + c.Fatalf("build failed to complete: %s, %v", out1, err) } - defer deleteImages(name1) } { name2 := "testonbuildtrigger2" @@ -2713,15 +2518,14 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { ` ctx, err := fakeContext(dockerfile2, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() - out2, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name2, ".") + out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") if err != nil { - t.Fatalf("build failed to complete: %s, %v", out2, err) + c.Fatalf("build failed to complete: %s, %v", out2, err) } - defer deleteImages(name2) } { name3 := "testonbuildtrigger3" @@ -2730,34 +2534,31 @@ func TestBuildOnBuildLimitedInheritence(t *testing.T) { ` ctx, err := fakeContext(dockerfile3, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() - out3, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-t", name3, ".") + out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") if err != nil { - t.Fatalf("build failed to complete: %s, %v", out3, err) + c.Fatalf("build failed to complete: %s, %v", out3, err) } - defer deleteImages(name3) } // ONBUILD should be run in second build. if !strings.Contains(out2, "ONBUILD PARENT") { - t.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") + c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") } // ONBUILD should *not* be run in third build. if strings.Contains(out3, "ONBUILD PARENT") { - t.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") + c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } - logDone("build - onbuild") } -func TestBuildWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildWithCache(c *check.C) { name := "testbuildwithcache" - defer deleteImages(name) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio @@ -2765,7 +2566,7 @@ func TestBuildWithCache(t *testing.T) { ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImage(name, `FROM scratch @@ -2774,18 +2575,16 @@ func TestBuildWithCache(t *testing.T) { ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - with cache") } -func TestBuildWithoutCache(t *testing.T) { +func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { name := "testbuildwithoutcache" name2 := "testbuildwithoutcache2" - defer deleteImages(name, name2) id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio @@ -2793,7 +2592,7 @@ func TestBuildWithoutCache(t *testing.T) { ENTRYPOINT ["/bin/echo"]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImage(name2, @@ -2803,18 +2602,15 @@ func TestBuildWithoutCache(t *testing.T) { ENTRYPOINT ["/bin/echo"]`, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } - logDone("build - without cache") } -func TestBuildConditionalCache(t *testing.T) { +func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { name := "testbuildconditionalcache" - name2 := "testbuildconditionalcache2" - defer deleteImages(name, name2) dockerfile := ` FROM busybox @@ -2823,42 +2619,39 @@ func TestBuildConditionalCache(t *testing.T) { "foo": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatalf("Error building #1: %s", err) + c.Fatalf("Error building #1: %s", err) } if err := ctx.Add("foo", "bye"); err != nil { - t.Fatalf("Error modifying foo: %s", err) + c.Fatalf("Error modifying foo: %s", err) } id2, err := buildImageFromContext(name, ctx, false) if err != nil { - t.Fatalf("Error building #2: %s", err) + c.Fatalf("Error building #2: %s", err) } if id2 == id1 { - t.Fatal("Should not have used the cache") + c.Fatal("Should not have used the cache") } id3, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatalf("Error building #3: %s", err) + c.Fatalf("Error building #3: %s", err) } if id3 != id2 { - t.Fatal("Should have used the cache") + c.Fatal("Should have used the cache") } - - logDone("build - conditional cache") } -func TestBuildADDLocalFileWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDLocalFileWithCache(c *check.C) { name := "testbuildaddlocalfilewithcache" name2 := "testbuildaddlocalfilewithcache2" - defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2869,26 +2662,24 @@ func TestBuildADDLocalFileWithCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - add local file with cache") } -func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDMultipleLocalFileWithCache(c *check.C) { name := "testbuildaddmultiplelocalfilewithcache" name2 := "testbuildaddmultiplelocalfilewithcache2" - defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2899,26 +2690,24 @@ func TestBuildADDMultipleLocalFileWithCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - add multiple local files with cache") } -func TestBuildADDLocalFileWithoutCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDLocalFileWithoutCache(c *check.C) { name := "testbuildaddlocalfilewithoutcache" name2 := "testbuildaddlocalfilewithoutcache2" - defer deleteImages(name, name2) dockerfile := ` FROM busybox MAINTAINER dockerio @@ -2929,26 +2718,24 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } - logDone("build - add local file without cache") } -func TestBuildCopyDirButNotFile(t *testing.T) { +func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { name := "testbuildcopydirbutnotfile" name2 := "testbuildcopydirbutnotfile2" - defer deleteImages(name, name2) dockerfile := ` FROM scratch COPY dir /tmp/` @@ -2957,33 +2744,31 @@ func TestBuildCopyDirButNotFile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but wasn't") + c.Fatal("The cache should have been used but wasn't") } - logDone("build - add current directory but not file") } -func TestBuildADDCurrentDirWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDCurrentDirWithCache(c *check.C) { name := "testbuildaddcurrentdirwithcache" name2 := name + "2" name3 := name + "3" name4 := name + "4" name5 := name + "5" - defer deleteImages(name, name2, name3, name4, name5) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -2993,60 +2778,58 @@ func TestBuildADDCurrentDirWithCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file invalidate cache of "ADD ." if err := ctx.Add("foo", "hello1"); err != nil { - t.Fatal(err) + c.Fatal(err) } id3, err := buildImageFromContext(name3, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id2 == id3 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file to same content invalidate cache of "ADD ." time.Sleep(1 * time.Second) // wait second because of mtime precision if err := ctx.Add("foo", "hello1"); err != nil { - t.Fatal(err) + c.Fatal(err) } id4, err := buildImageFromContext(name4, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id3 == id4 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } id5, err := buildImageFromContext(name5, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id4 != id5 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - add current directory with cache") } -func TestBuildADDCurrentDirWithoutCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDCurrentDirWithoutCache(c *check.C) { name := "testbuildaddcurrentdirwithoutcache" name2 := "testbuildaddcurrentdirwithoutcache2" - defer deleteImages(name, name2) dockerfile := ` FROM scratch MAINTAINER dockerio @@ -3056,30 +2839,28 @@ func TestBuildADDCurrentDirWithoutCache(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } - logDone("build - add current directory without cache") } -func TestBuildADDRemoteFileWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDRemoteFileWithCache(c *check.C) { name := "testbuildaddremotefilewithcache" - defer deleteImages(name) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -3089,7 +2870,7 @@ func TestBuildADDRemoteFileWithCache(t *testing.T) { ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImage(name, fmt.Sprintf(`FROM scratch @@ -3097,23 +2878,21 @@ func TestBuildADDRemoteFileWithCache(t *testing.T) { ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - add remote file with cache") } -func TestBuildADDRemoteFileWithoutCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDRemoteFileWithoutCache(c *check.C) { name := "testbuildaddremotefilewithoutcache" name2 := "testbuildaddremotefilewithoutcache2" - defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -3123,7 +2902,7 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch @@ -3131,26 +2910,23 @@ func TestBuildADDRemoteFileWithoutCache(t *testing.T) { ADD %s/baz /usr/lib/baz/quux`, server.URL()), false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } - logDone("build - add remote file without cache") } -func TestBuildADDRemoteFileMTime(t *testing.T) { +func (s *DockerSuite) TestBuildADDRemoteFileMTime(c *check.C) { name := "testbuildaddremotefilemtime" name2 := name + "2" name3 := name + "3" name4 := name + "4" - defer deleteImages(name, name2, name3, name4) - files := map[string]string{"baz": "hello"} server, err := fakeStorage(files) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -3158,21 +2934,21 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but wasn't - #1") + c.Fatal("The cache should have been used but wasn't - #1") } // Now create a different server withsame contents (causes different mtim) @@ -3183,7 +2959,7 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { server2, err := fakeStorage(files) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server2.Close() @@ -3191,36 +2967,34 @@ func TestBuildADDRemoteFileMTime(t *testing.T) { MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx2.Close() id3, err := buildImageFromContext(name3, ctx2, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id3 { - t.Fatal("The cache should not have been used but was") + c.Fatal("The cache should not have been used but was") } // And for good measure do it again and make sure cache is used this time id4, err := buildImageFromContext(name4, ctx2, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id3 != id4 { - t.Fatal("The cache should have been used but wasn't - #2") + c.Fatal("The cache should have been used but wasn't - #2") } - logDone("build - add remote file testing mtime") } -func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDLocalAndRemoteFilesWithCache(c *check.C) { name := "testbuildaddlocalandremotefilewithcache" - defer deleteImages(name) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -3232,24 +3006,23 @@ func TestBuildADDLocalAndRemoteFilesWithCache(t *testing.T) { "foo": "hello world", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 != id2 { - t.Fatal("The cache should have been used but hasn't.") + c.Fatal("The cache should have been used but hasn't.") } - logDone("build - add local and remote file with cache") } -func testContextTar(t *testing.T, compression archive.Compression) { +func testContextTar(c *check.C, compression archive.Compression) { ctx, err := fakeContext( `FROM busybox ADD foo /foo @@ -3260,58 +3033,51 @@ CMD ["cat", "/foo"]`, ) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } context, err := archive.Tar(ctx.Dir, compression) if err != nil { - t.Fatalf("failed to build context tar: %v", err) + c.Fatalf("failed to build context tar: %v", err) } name := "contexttar" buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") - defer deleteImages(name) buildCmd.Stdin = context if out, _, err := runCommandWithOutput(buildCmd); err != nil { - t.Fatalf("build failed to complete: %v %v", out, err) + c.Fatalf("build failed to complete: %v %v", out, err) } } -func TestBuildContextTarGzip(t *testing.T) { - testContextTar(t, archive.Gzip) - logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", archive.Gzip)) +func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { + testContextTar(c, archive.Gzip) } -func TestBuildContextTarNoCompression(t *testing.T) { - testContextTar(t, archive.Uncompressed) - logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", archive.Uncompressed)) +func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { + testContextTar(c, archive.Uncompressed) } -func TestBuildNoContext(t *testing.T) { +func (s *DockerSuite) TestBuildNoContext(c *check.C) { buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader("FROM busybox\nCMD echo ok\n") if out, _, err := runCommandWithOutput(buildCmd); err != nil { - t.Fatalf("build failed to complete: %v %v", out, err) + c.Fatalf("build failed to complete: %v %v", out, err) } - if out, _, err := dockerCmd(t, "run", "--rm", "nocontext"); out != "ok\n" || err != nil { - t.Fatalf("run produced invalid output: %q, expected %q", out, "ok") + if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { + c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } - - deleteImages("nocontext") - logDone("build - build an image with no context") } // TODO: TestCaching -func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { +func (s *DockerSuite) TestBuildADDLocalAndRemoteFilesWithoutCache(c *check.C) { name := "testbuildaddlocalandremotefilewithoutcache" name2 := "testbuildaddlocalandremotefilewithoutcache2" - defer deleteImages(name, name2) server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -3323,26 +3089,24 @@ func TestBuildADDLocalAndRemoteFilesWithoutCache(t *testing.T) { "foo": "hello world", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("The cache should have been invalided but hasn't.") + c.Fatal("The cache should have been invalided but hasn't.") } - logDone("build - add local and remote file without cache") } -func TestBuildWithVolumeOwnership(t *testing.T) { +func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { name := "testbuildimg" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox:latest @@ -3351,36 +3115,34 @@ func TestBuildWithVolumeOwnership(t *testing.T) { true) if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--rm", "testbuildimg", "ls", "-la", "/test") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if expected := "drw-------"; !strings.Contains(out, expected) { - t.Fatalf("expected %s received %s", expected, out) + c.Fatalf("expected %s received %s", expected, out) } if expected := "daemon daemon"; !strings.Contains(out, expected) { - t.Fatalf("expected %s received %s", expected, out) + c.Fatalf("expected %s received %s", expected, out) } - logDone("build - volume ownership") } // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache -func TestBuildEntrypointRunCleanup(t *testing.T) { +func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { name := "testbuildcmdcleanup" - defer deleteImages(name) if _, err := buildImage(name, `FROM busybox RUN echo "hello"`, true); err != nil { - t.Fatal(err) + c.Fatal(err) } ctx, err := fakeContext(`FROM busybox @@ -3392,25 +3154,23 @@ func TestBuildEntrypointRunCleanup(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } // Cmd must be cleaned up - if expected := ""; res != expected { - t.Fatalf("Cmd %s, expected %s", res, expected) + if res != "" { + c.Fatalf("Cmd %s, expected nil", res) } - logDone("build - cleanup cmd after RUN") } -func TestBuildForbiddenContextPath(t *testing.T) { +func (s *DockerSuite) TestBuildForbiddenContextPath(c *check.C) { name := "testbuildforbidpath" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD ../../ test/ `, @@ -3420,51 +3180,47 @@ func TestBuildForbiddenContextPath(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "Forbidden path outside the build context: ../../ " if _, err := buildImageFromContext(name, ctx, true); err == nil || !strings.Contains(err.Error(), expected) { - t.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) + c.Fatalf("Wrong error: (should contain \"%s\") got:\n%v", expected, err) } - logDone("build - forbidden context path") } -func TestBuildADDFileNotFound(t *testing.T) { +func (s *DockerSuite) TestBuildADDFileNotFound(c *check.C) { name := "testbuildaddnotfound" - defer deleteImages(name) ctx, err := fakeContext(`FROM scratch ADD foo /usr/local/bar`, map[string]string{"bar": "hello"}) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { if !strings.Contains(err.Error(), "foo: no such file or directory") { - t.Fatalf("Wrong error %v, must be about missing foo file or directory", err) + c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - add file not found") } -func TestBuildInheritance(t *testing.T) { +func (s *DockerSuite) TestBuildInheritance(c *check.C) { name := "testbuildinheritance" - defer deleteImages(name) _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } ports1, err := inspectField(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name, @@ -3472,133 +3228,118 @@ func TestBuildInheritance(t *testing.T) { ENTRYPOINT ["/bin/echo"]`, name), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if expected := "[/bin/echo]"; res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + if expected := "{[/bin/echo]}"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) } ports2, err := inspectField(name, "Config.ExposedPorts") if err != nil { - t.Fatal(err) + c.Fatal(err) } if ports1 != ports2 { - t.Fatalf("Ports must be same: %s != %s", ports1, ports2) + c.Fatalf("Ports must be same: %s != %s", ports1, ports2) } - logDone("build - inheritance") } -func TestBuildFails(t *testing.T) { +func (s *DockerSuite) TestBuildFails(c *check.C) { name := "testbuildfails" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, true) if err != nil { if !strings.Contains(err.Error(), "returned a non-zero code: 23") { - t.Fatalf("Wrong error %v, must be about non-zero code 23", err) + c.Fatalf("Wrong error %v, must be about non-zero code 23", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - unsuccessful") } -func TestBuildFailsDockerfileEmpty(t *testing.T) { +func (s *DockerSuite) TestBuildFailsDockerfileEmpty(c *check.C) { name := "testbuildfails" - defer deleteImages(name) _, err := buildImage(name, ``, true) if err != nil { if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { - t.Fatalf("Wrong error %v, must be about empty Dockerfile", err) + c.Fatalf("Wrong error %v, must be about empty Dockerfile", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - unsuccessful with empty dockerfile") } -func TestBuildOnBuild(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuild(c *check.C) { name := "testbuildonbuild" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD RUN touch foobar`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s RUN [ -f foobar ]`, name), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - onbuild") } -func TestBuildOnBuildForbiddenChained(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenChained(c *check.C) { name := "testbuildonbuildforbiddenchained" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD ONBUILD RUN touch foobar`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { - t.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) + c.Fatalf("Wrong error %v, must be about chaining ONBUILD", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden chained") } -func TestBuildOnBuildForbiddenFrom(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenFrom(c *check.C) { name := "testbuildonbuildforbiddenfrom" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD FROM scratch`, true) if err != nil { if !strings.Contains(err.Error(), "FROM isn't allowed as an ONBUILD trigger") { - t.Fatalf("Wrong error %v, must be about FROM forbidden", err) + c.Fatalf("Wrong error %v, must be about FROM forbidden", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden from") } -func TestBuildOnBuildForbiddenMaintainer(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainer(c *check.C) { name := "testbuildonbuildforbiddenmaintainer" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ONBUILD MAINTAINER docker.io`, true) if err != nil { if !strings.Contains(err.Error(), "MAINTAINER isn't allowed as an ONBUILD trigger") { - t.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) + c.Fatalf("Wrong error %v, must be about MAINTAINER forbidden", err) } } else { - t.Fatal("Error must not be nil") + c.Fatal("Error must not be nil") } - logDone("build - onbuild forbidden maintainer") } // gh #2446 -func TestBuildAddToSymlinkDest(t *testing.T) { +func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { name := "testbuildaddtosymlinkdest" - defer deleteImages(name) ctx, err := fakeContext(`FROM busybox RUN mkdir /foo RUN ln -s /foo /bar @@ -3609,18 +3350,16 @@ func TestBuildAddToSymlinkDest(t *testing.T) { "foo": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add to symlink destination") } -func TestBuildEscapeWhitespace(t *testing.T) { +func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { name := "testbuildescaping" - defer deleteImages(name) _, err := buildImage(name, ` FROM busybox @@ -3632,20 +3371,18 @@ docker.com>" res, err := inspectField(name, "Author") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != "\"Docker IO \"" { - t.Fatalf("Parsed string did not match the escaped string. Got: %q", res) + c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) } - logDone("build - validate escaping whitespace") } -func TestBuildVerifyIntString(t *testing.T) { +func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { // Verify that strings that look like ints are still passed as strings name := "testbuildstringing" - defer deleteImages(name) _, err := buildImage(name, ` FROM busybox @@ -3654,19 +3391,17 @@ func TestBuildVerifyIntString(t *testing.T) { out, rc, err := runCommandWithOutput(exec.Command(dockerBinary, "inspect", name)) if rc != 0 || err != nil { - t.Fatalf("Unexcepted error from inspect: rc: %v err: %v", rc, err) + c.Fatalf("Unexpected error from inspect: rc: %v err: %v", rc, err) } if !strings.Contains(out, "\"123\"") { - t.Fatalf("Output does not contain the int as a string:\n%s", out) + c.Fatalf("Output does not contain the int as a string:\n%s", out) } - logDone("build - verify int/strings as strings") } -func TestBuildDockerignore(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignore(c *check.C) { name := "testbuilddockerignore" - defer deleteImages(name) dockerfile := ` FROM busybox ADD . /bla @@ -3675,29 +3410,36 @@ func TestBuildDockerignore(t *testing.T) { RUN [[ ! -e /bla/src/_vendor ]] RUN [[ ! -e /bla/.gitignore ]] RUN [[ ! -e /bla/README.md ]] + RUN [[ ! -e /bla/dir/foo ]] + RUN [[ ! -e /bla/foo ]] RUN [[ ! -e /bla/.git ]]` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", + "dir/foo": "", ".gitignore": "", "README.md": "readme", - ".dockerignore": ".git\npkg\n.gitignore\nsrc/_vendor\n*.md", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir`, }) - defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } + defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - test .dockerignore") } -func TestBuildDockerignoreCleanPaths(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { name := "testbuilddockerignorecleanpaths" - defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ @@ -3709,19 +3451,66 @@ func TestBuildDockerignoreCleanPaths(t *testing.T) { ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - test .dockerignore with clean paths") } -func TestBuildDockerignoringDockerfile(t *testing.T) { - name := "testbuilddockerignoredockerfile" +func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { + name := "testbuilddockerignoreexceptions" defer deleteImages(name) dockerfile := ` + FROM busybox + ADD . /bla + RUN [[ -f /bla/src/x.go ]] + RUN [[ -f /bla/Makefile ]] + RUN [[ ! -e /bla/src/_vendor ]] + RUN [[ ! -e /bla/.gitignore ]] + RUN [[ ! -e /bla/README.md ]] + RUN [[ -e /bla/dir/dir/foo ]] + RUN [[ ! -e /bla/dir/foo1 ]] + RUN [[ -f /bla/dir/e ]] + RUN [[ -f /bla/dir/e-dir/foo ]] + RUN [[ ! -e /bla/foo ]] + RUN [[ ! -e /bla/.git ]]` + ctx, err := fakeContext(dockerfile, map[string]string{ + "Makefile": "all:", + ".git/HEAD": "ref: foo", + "src/x.go": "package main", + "src/_vendor/v.go": "package main", + "dir/foo": "", + "dir/foo1": "", + "dir/dir/f1": "", + "dir/dir/foo": "", + "dir/e": "", + "dir/e-dir/foo": "", + ".gitignore": "", + "README.md": "readme", + ".dockerignore": ` +.git +pkg +.gitignore +src/_vendor +*.md +dir +!dir/e* +!dir/dir/foo`, + }) + if err != nil { + c.Fatal(err) + } + defer ctx.Close() + if _, err := buildImageFromContext(name, ctx, true); err != nil { + c.Fatal(err) + } +} + +func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { + name := "testbuilddockerignoredockerfile" + dockerfile := ` FROM busybox ADD . /tmp/ RUN ! ls /tmp/Dockerfile @@ -3731,26 +3520,24 @@ func TestBuildDockerignoringDockerfile(t *testing.T) { ".dockerignore": "Dockerfile\n", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't ignore Dockerfile correctly:%s", err) + c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) } // now try it with ./Dockerfile ctx.Add(".dockerignore", "./Dockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) + c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) } - logDone("build - test .dockerignore of Dockerfile") } -func TestBuildDockerignoringRenamedDockerfile(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { name := "testbuilddockerignoredockerfile" - defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ @@ -3763,26 +3550,24 @@ func TestBuildDockerignoringRenamedDockerfile(t *testing.T) { ".dockerignore": "MyDockerfile\n", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) + c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) } // now try it with ./MyDockerfile ctx.Add(".dockerignore", "./MyDockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) + c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) } - logDone("build - test .dockerignore of renamed Dockerfile") } -func TestBuildDockerignoringDockerignore(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { name := "testbuilddockerignoredockerignore" - defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/ @@ -3794,20 +3579,18 @@ func TestBuildDockerignoringDockerignore(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't ignore .dockerignore correctly:%s", err) + c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) } - logDone("build - test .dockerignore of .dockerignore") } -func TestBuildDockerignoreTouchDockerfile(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { var id1 string var id2 string name := "testbuilddockerignoretouchdockerfile" - defer deleteImages(name) dockerfile := ` FROM busybox ADD . /tmp/` @@ -3817,48 +3600,46 @@ func TestBuildDockerignoreTouchDockerfile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if id1, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't build it correctly:%s", err) + c.Fatalf("Didn't build it correctly:%s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't build it correctly:%s", err) + c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { - t.Fatalf("Didn't use the cache - 1") + c.Fatalf("Didn't use the cache - 1") } // Now make sure touching Dockerfile doesn't invalidate the cache if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { - t.Fatalf("Didn't add Dockerfile: %s", err) + c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't build it correctly:%s", err) + c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { - t.Fatalf("Didn't use the cache - 2") + c.Fatalf("Didn't use the cache - 2") } // One more time but just 'touch' it instead of changing the content if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { - t.Fatalf("Didn't add Dockerfile: %s", err) + c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("Didn't build it correctly:%s", err) + c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { - t.Fatalf("Didn't use the cache - 3") + c.Fatalf("Didn't use the cache - 3") } - logDone("build - test .dockerignore touch dockerfile") } -func TestBuildDockerignoringWholeDir(t *testing.T) { +func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { name := "testbuilddockerignorewholedir" - defer deleteImages(name) dockerfile := ` FROM busybox COPY . / @@ -3867,21 +3648,20 @@ func TestBuildDockerignoringWholeDir(t *testing.T) { ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", + ".gitignore": "", ".dockerignore": ".*\n", }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err = buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - test .dockerignore whole dir with .*") } -func TestBuildLineBreak(t *testing.T) { +func (s *DockerSuite) TestBuildLineBreak(c *check.C) { name := "testbuildlinebreak" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass \ @@ -3891,14 +3671,12 @@ RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - line break with \\") } -func TestBuildEOLInLine(t *testing.T) { +func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { name := "testbuildeolinline" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass > /tmp/passwd' @@ -3908,14 +3686,12 @@ RUN [ "$(cat /tmp/passwd)" = "root:testpass" ] RUN [ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - end of line in dockerfile instruction") } -func TestBuildCommentsShebangs(t *testing.T) { +func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { name := "testbuildcomments" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox # This is an ordinary comment. @@ -3928,14 +3704,12 @@ RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] RUN [ "$(/hello.sh)" = "hello world" ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - comments and shebangs") } -func TestBuildUsersAndGroups(t *testing.T) { +func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { name := "testbuildusers" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox @@ -3992,14 +3766,12 @@ USER 1042:1043 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - users and groups") } -func TestBuildEnvUsage(t *testing.T) { +func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { name := "testbuildenvusage" - defer deleteImages(name) dockerfile := `FROM busybox ENV HOME /root ENV PATH $HOME/bin:$PATH @@ -4023,20 +3795,18 @@ RUN [ "$ghi" = "def" ] "hello/docker/world": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - environment variables usage") } -func TestBuildEnvUsage2(t *testing.T) { +func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { name := "testbuildenvusage2" - defer deleteImages(name) dockerfile := `FROM busybox ENV abc=def RUN [ "$abc" = "def" ] @@ -4127,20 +3897,18 @@ RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] "hello/docker/world": "hello", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - environment variables usage2") } -func TestBuildAddScript(t *testing.T) { +func (s *DockerSuite) TestBuildAddScript(c *check.C) { name := "testbuildaddscript" - defer deleteImages(name) dockerfile := ` FROM busybox ADD test /test @@ -4151,20 +3919,18 @@ RUN [ "$(cat /testfile)" = 'test!' ]` "test": "#!/bin/sh\necho 'test!' > /testfile", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - add and run script") } -func TestBuildAddTar(t *testing.T) { +func (s *DockerSuite) TestBuildAddTar(c *check.C) { name := "testbuildaddtar" - defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` @@ -4185,7 +3951,7 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { - t.Fatalf("failed to create test.tar archive: %v", err) + c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() @@ -4195,32 +3961,30 @@ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` Name: "test/foo", Size: 2, }); err != nil { - t.Fatalf("failed to write tar file header: %v", err) + c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { - t.Fatalf("failed to write tar file content: %v", err) + c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { - t.Fatalf("failed to close tar archive: %v", err) + c.Fatalf("failed to close tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - t.Fatalf("failed to open destination dockerfile: %v", err) + c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("build failed to complete for TestBuildAddTar: %v", err) + c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) } - logDone("build - ADD tar") } -func TestBuildAddTarXz(t *testing.T) { +func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { name := "testbuildaddtarxz" - defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` @@ -4230,7 +3994,7 @@ func TestBuildAddTarXz(t *testing.T) { tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { - t.Fatalf("failed to create test.tar archive: %v", err) + c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() @@ -4240,23 +4004,23 @@ func TestBuildAddTarXz(t *testing.T) { Name: "test/foo", Size: 2, }); err != nil { - t.Fatalf("failed to write tar file header: %v", err) + c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { - t.Fatalf("failed to write tar file content: %v", err) + c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { - t.Fatalf("failed to close tar archive: %v", err) + c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - t.Fatalf("failed to open destination dockerfile: %v", err) + c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() @@ -4264,15 +4028,13 @@ func TestBuildAddTarXz(t *testing.T) { defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } - logDone("build - ADD tar.xz") } -func TestBuildAddTarXzGz(t *testing.T) { +func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { name := "testbuildaddtarxzgz" - defer deleteImages(name) ctx := func() *FakeContext { dockerfile := ` @@ -4282,7 +4044,7 @@ func TestBuildAddTarXzGz(t *testing.T) { tmpDir, err := ioutil.TempDir("", "fake-context") testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { - t.Fatalf("failed to create test.tar archive: %v", err) + c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() @@ -4292,31 +4054,31 @@ func TestBuildAddTarXzGz(t *testing.T) { Name: "test/foo", Size: 2, }); err != nil { - t.Fatalf("failed to write tar file header: %v", err) + c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { - t.Fatalf("failed to write tar file content: %v", err) + c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { - t.Fatalf("failed to close tar archive: %v", err) + c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } gzipCompressCmd := exec.Command("gzip", "test.tar.xz") gzipCompressCmd.Dir = tmpDir out, _, err = runCommandWithOutput(gzipCompressCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - t.Fatalf("failed to open destination dockerfile: %v", err) + c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() @@ -4324,15 +4086,13 @@ func TestBuildAddTarXzGz(t *testing.T) { defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) + c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } - logDone("build - ADD tar.xz.gz") } -func TestBuildFromGIT(t *testing.T) { +func (s *DockerSuite) TestBuildFromGIT(c *check.C) { name := "testbuildfromgit" - defer deleteImages(name) git, err := fakeGIT("repo", map[string]string{ "Dockerfile": `FROM busybox ADD first /first @@ -4341,321 +4101,290 @@ func TestBuildFromGIT(t *testing.T) { "first": "test git data", }, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer git.Close() _, err = buildImageFromPath(name, git.RepoURL, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Author") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != "docker" { - t.Fatalf("Maintainer should be docker, got %s", res) + c.Fatalf("Maintainer should be docker, got %s", res) } - logDone("build - build from GIT") } -func TestBuildCleanupCmdOnEntrypoint(t *testing.T) { +func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { name := "testbuildcmdcleanuponentrypoint" - defer deleteImages(name) if _, err := buildImage(name, `FROM scratch CMD ["test"] ENTRYPOINT ["echo"]`, true); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["cat"]`, name), true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectField(name, "Config.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if expected := ""; res != expected { - t.Fatalf("Cmd %s, expected %s", res, expected) + if res != "" { + c.Fatalf("Cmd %s, expected nil", res) } + res, err = inspectField(name, "Config.Entrypoint") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if expected := "[cat]"; res != expected { - t.Fatalf("Entrypoint %s, expected %s", res, expected) + if expected := "{[cat]}"; res != expected { + c.Fatalf("Entrypoint %s, expected %s", res, expected) } - logDone("build - cleanup cmd on ENTRYPOINT") } -func TestBuildClearCmd(t *testing.T) { +func (s *DockerSuite) TestBuildClearCmd(c *check.C) { name := "testbuildclearcmd" - defer deleteImages(name) _, err := buildImage(name, `From scratch ENTRYPOINT ["/bin/bash"] CMD []`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != "[]" { - t.Fatalf("Cmd %s, expected %s", res, "[]") + c.Fatalf("Cmd %s, expected %s", res, "[]") } - logDone("build - clearcmd") } -func TestBuildEmptyCmd(t *testing.T) { +func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { name := "testbuildemptycmd" - defer deleteImages(name) if _, err := buildImage(name, "FROM scratch\nMAINTAINER quux\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != "null" { - t.Fatalf("Cmd %s, expected %s", res, "null") + c.Fatalf("Cmd %s, expected %s", res, "null") } - logDone("build - empty cmd") } -func TestBuildOnBuildOutput(t *testing.T) { +func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { name := "testbuildonbuildparent" - defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } - childname := "testbuildonbuildchild" - defer deleteImages(childname) - _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "Trigger 0, RUN echo foo") { - t.Fatal("failed to find the ONBUILD output", out) + c.Fatal("failed to find the ONBUILD output", out) } - logDone("build - onbuild output") } -func TestBuildInvalidTag(t *testing.T) { +func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) - defer deleteImages(name) _, out, err := buildImageWithOut(name, "FROM scratch\nMAINTAINER quux\n", true) // if the error doesnt check for illegal tag name, or the image is built // then this should fail if !strings.Contains(out, "Illegal tag name") || strings.Contains(out, "Sending build context to Docker daemon") { - t.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) + c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) } - logDone("build - invalid tag") } -func TestBuildCmdShDashC(t *testing.T) { +func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { name := "testbuildcmdshc" - defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { - t.Fatal(err, res) + c.Fatal(err, res) } expected := `["/bin/sh","-c","echo cmd"]` if res != expected { - t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } - logDone("build - cmd should have sh -c for non-json") } -func TestBuildCmdSpaces(t *testing.T) { +func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { // Test to make sure that when we strcat arrays we take into account // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't // look the same name := "testbuildcmdspaces" - defer deleteImages(name) var id1 string var id2 string var err error if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("Should not have resulted in the same CMD") + c.Fatal("Should not have resulted in the same CMD") } // Now do the same with ENTRYPOINT if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { - t.Fatal(err) + c.Fatal(err) } if id1 == id2 { - t.Fatal("Should not have resulted in the same ENTRYPOINT") + c.Fatal("Should not have resulted in the same ENTRYPOINT") } - logDone("build - cmd with spaces") } -func TestBuildCmdJSONNoShDashC(t *testing.T) { +func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { name := "testbuildcmdjson" - defer deleteImages(name) if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Cmd") if err != nil { - t.Fatal(err, res) + c.Fatal(err, res) } expected := `["echo","cmd"]` if res != expected { - t.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) + c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } - logDone("build - cmd should not have /bin/sh -c for json") } -func TestBuildErrorInvalidInstruction(t *testing.T) { +func (s *DockerSuite) TestBuildErrorInvalidInstruction(c *check.C) { name := "testbuildignoreinvalidinstruction" - defer deleteImages(name) out, _, err := buildImageWithOut(name, "FROM busybox\nfoo bar", true) if err == nil { - t.Fatalf("Should have failed: %s", out) + c.Fatalf("Should have failed: %s", out) } - logDone("build - error invalid Dockerfile instruction") } -func TestBuildEntrypointInheritance(t *testing.T) { - defer deleteImages("parent", "child") - defer deleteAllContainers() +func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { if _, err := buildImage("parent", ` FROM busybox ENTRYPOINT exit 130 `, true); err != nil { - t.Fatal(err) + c.Fatal(err) } status, _ := runCommand(exec.Command(dockerBinary, "run", "parent")) if status != 130 { - t.Fatalf("expected exit code 130 but received %d", status) + c.Fatalf("expected exit code 130 but received %d", status) } if _, err := buildImage("child", ` FROM parent ENTRYPOINT exit 5 `, true); err != nil { - t.Fatal(err) + c.Fatal(err) } status, _ = runCommand(exec.Command(dockerBinary, "run", "child")) if status != 5 { - t.Fatalf("expected exit code 5 but received %d", status) + c.Fatalf("expected exit code 5 but received %d", status) } - logDone("build - clear entrypoint") } -func TestBuildEntrypointInheritanceInspect(t *testing.T) { +func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" expected = `["/bin/sh","-c","echo quux"]` ) - defer deleteImages(name, name2) - defer deleteAllContainers() - if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name2, "Config.Entrypoint") if err != nil { - t.Fatal(err, res) + c.Fatal(err, res) } if res != expected { - t.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) + c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-t", name2)) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } expected = "quux" if strings.TrimSpace(out) != expected { - t.Fatalf("Expected output is %s, got %s", expected, out) + c.Fatalf("Expected output is %s, got %s", expected, out) } - logDone("build - entrypoint override inheritance properly") } -func TestBuildRunShEntrypoint(t *testing.T) { +func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { name := "testbuildentrypoint" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox ENTRYPOINT /bin/echo`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - logDone("build - entrypoint with /bin/echo running successfully") } -func TestBuildExoticShellInterpolation(t *testing.T) { +func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { name := "testbuildexoticshellinterpolation" - defer deleteImages(name) _, err := buildImage(name, ` FROM busybox - + ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] @@ -4673,21 +4402,18 @@ func TestBuildExoticShellInterpolation(t *testing.T) { RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] `, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - exotic shell interpolation") } -func TestBuildVerifySingleQuoteFails(t *testing.T) { +func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { // This testcase is supposed to generate an error because the // JSON array we're passing in on the CMD uses single quotes instead // of double quotes (per the JSON spec). This means we interpret it // as a "string" insead of "JSON array" and pass it on to "sh -c" and // it should barf on it. name := "testbuildsinglequotefails" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, `FROM busybox @@ -4696,15 +4422,13 @@ func TestBuildVerifySingleQuoteFails(t *testing.T) { _, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err == nil { - t.Fatal("The image was not supposed to be able to run") + c.Fatal("The image was not supposed to be able to run") } - logDone("build - verify single quotes break the build") } -func TestBuildVerboseOut(t *testing.T) { +func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { name := "testbuildverboseout" - defer deleteImages(name) _, out, err := buildImageWithOut(name, `FROM busybox @@ -4712,87 +4436,81 @@ RUN echo 123`, false) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "\n123\n") { - t.Fatalf("Output should contain %q: %q", "123", out) + c.Fatalf("Output should contain %q: %q", "123", out) } - logDone("build - verbose output from commands") } -func TestBuildWithTabs(t *testing.T) { +func (s *DockerSuite) TestBuildWithTabs(c *check.C) { name := "testbuildwithtabs" - defer deleteImages(name) _, err := buildImage(name, "FROM busybox\nRUN echo\tone\t\ttwo", true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "ContainerConfig.Cmd") if err != nil { - t.Fatal(err) + c.Fatal(err) } expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates if res != expected1 && res != expected2 { - t.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) + c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) } - logDone("build - with tabs") } -func TestBuildLabels(t *testing.T) { +func (s *DockerSuite) TestBuildLabels(c *check.C) { name := "testbuildlabel" expected := `{"License":"GPL","Vendor":"Acme"}` - defer deleteImages(name) _, err := buildImage(name, `FROM busybox LABEL Vendor=Acme LABEL License GPL`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } res, err := inspectFieldJSON(name, "Config.Labels") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != expected { - t.Fatalf("Labels %s, expected %s", res, expected) + c.Fatalf("Labels %s, expected %s", res, expected) } - logDone("build - label") } -func TestBuildLabelsCache(t *testing.T) { +func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { name := "testbuildlabelcache" - defer deleteImages(name) id1, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, false) if err != nil { - t.Fatalf("Build 1 should have worked: %v", err) + c.Fatalf("Build 1 should have worked: %v", err) } id2, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, true) if err != nil || id1 != id2 { - t.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) + c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor=Acme1`, true) if err != nil || id1 == id2 { - t.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) + c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor Acme`, true) // Note: " " and "=" should be same if err != nil || id1 != id2 { - t.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) + c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) } // Now make sure the cache isn't used by mistake @@ -4800,28 +4518,26 @@ func TestBuildLabelsCache(t *testing.T) { `FROM busybox LABEL f1=b1 f2=b2`, false) if err != nil { - t.Fatalf("Build 5 should have worked: %q", err) + c.Fatalf("Build 5 should have worked: %q", err) } id2, err = buildImage(name, `FROM busybox LABEL f1="b1 f2=b2"`, true) if err != nil || id1 == id2 { - t.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) + c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) } - logDone("build - label cache") } -func TestBuildStderr(t *testing.T) { +func (s *DockerSuite) TestBuildStderr(c *check.C) { // This test just makes sure that no non-error output goes // to stderr name := "testbuildstderr" - defer deleteImages(name) _, _, stderr, err := buildImageWithStdoutStderr(name, "FROM busybox\nRUN echo one", true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if runtime.GOOS == "windows" { @@ -4829,22 +4545,20 @@ func TestBuildStderr(t *testing.T) { lines := strings.Split(stderr, "\n") for _, v := range lines { if v != "" && !strings.Contains(v, "SECURITY WARNING:") { - t.Fatalf("Stderr contains unexpected output line: %q", v) + c.Fatalf("Stderr contains unexpected output line: %q", v) } } } else { if stderr != "" { - t.Fatalf("Stderr should have been empty, instead its: %q", stderr) + c.Fatalf("Stderr should have been empty, instead its: %q", stderr) } } - logDone("build - testing stderr") } -func TestBuildChownSingleFile(t *testing.T) { - testRequires(t, UnixCli) // test uses chown: not available on windows +func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { + testRequires(c, UnixCli) // test uses chown: not available on windows name := "testbuildchownsinglefile" - defer deleteImages(name) ctx, err := fakeContext(` FROM busybox @@ -4855,46 +4569,45 @@ RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] "test": "test", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - change permission on single file") } -func TestBuildSymlinkBreakout(t *testing.T) { +func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { name := "testbuildsymlinkbreakout" tmpdir, err := ioutil.TempDir("", name) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` from busybox add symlink.tar / add inject /symlink/ `), 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } inject := filepath.Join(ctx, "inject") if err := ioutil.WriteFile(inject, nil, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } f, err := os.Create(filepath.Join(ctx, "symlink.tar")) if err != nil { - t.Fatal(err) + c.Fatal(err) } w := tar.NewWriter(f) w.WriteHeader(&tar.Header{ @@ -4914,19 +4627,17 @@ func TestBuildSymlinkBreakout(t *testing.T) { w.Close() f.Close() if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { - t.Fatal("symlink breakout - inject") + c.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { - t.Fatalf("unexpected error: %v", err) + c.Fatalf("unexpected error: %v", err) } - logDone("build - symlink breakout") } -func TestBuildXZHost(t *testing.T) { +func (s *DockerSuite) TestBuildXZHost(c *check.C) { name := "testbuildxzhost" - defer deleteImages(name) ctx, err := fakeContext(` FROM busybox @@ -4942,23 +4653,21 @@ RUN [ ! -e /injected ]`, }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("build - xz host is being used") } -func TestBuildVolumesRetainContents(t *testing.T) { +func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { var ( name = "testbuildvolumescontent" expected = "some text" ) - defer deleteImages(name) ctx, err := fakeContext(` FROM busybox COPY content /foo/file @@ -4968,27 +4677,25 @@ CMD cat /foo/file`, "content": expected, }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, false); err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", name)) if err != nil { - t.Fatal(err) + c.Fatal(err) } if out != expected { - t.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) + c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) } - logDone("build - volumes retain contents in build") } -func TestBuildRenamedDockerfile(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, @@ -5001,100 +4708,98 @@ func TestBuildRenamedDockerfile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { - t.Fatalf("Failed to build: %s\n%s", out, err) + c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { - t.Fatalf("test1 should have used Dockerfile, output:%s", out) + c.Fatalf("test1 should have used Dockerfile, output:%s", out) } - out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "from files/Dockerfile") { - t.Fatalf("test2 should have used files/Dockerfile, output:%s", out) + c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) } - out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "from files/dFile") { - t.Fatalf("test3 should have used files/dFile, output:%s", out) + c.Fatalf("test3 should have used files/dFile, output:%s", out) } - out, _, err = dockerCmdInDir(t, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, "from dFile") { - t.Fatalf("test4 should have used dFile, output:%s", out) + c.Fatalf("test4 should have used dFile, output:%s", out) } dirWithNoDockerfile, _ := ioutil.TempDir(os.TempDir(), "test5") nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") if _, err = os.Create(nonDockerfileFile); err != nil { - t.Fatal(err) + c.Fatal(err) } - out, _, err = dockerCmdInDir(t, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") + out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") if err == nil { - t.Fatalf("test5 was supposed to fail to find passwd") + c.Fatalf("test5 was supposed to fail to find passwd") } if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", strings.Replace(nonDockerfileFile, `\`, `\\`, -1)); !strings.Contains(out, expected) { - t.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected) + c.Fatalf("wrong error messsage:%v\nexpected to contain=%v", out, expected) } - out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") if err != nil { - t.Fatalf("test6 failed: %s", err) + c.Fatalf("test6 failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { - t.Fatalf("test6 should have used root Dockerfile, output:%s", out) + c.Fatalf("test6 should have used root Dockerfile, output:%s", out) } - out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") if err != nil { - t.Fatalf("test7 failed: %s", err) + c.Fatalf("test7 failed: %s", err) } if !strings.Contains(out, "from files/Dockerfile") { - t.Fatalf("test7 should have used files Dockerfile, output:%s", out) + c.Fatalf("test7 should have used files Dockerfile, output:%s", out) } - out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") if err == nil || !strings.Contains(out, "must be within the build context") { - t.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) + c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) } tmpDir := os.TempDir() - out, _, err = dockerCmdInDir(t, tmpDir, "build", "-t", "test9", ctx.Dir) + out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) if err != nil { - t.Fatalf("test9 - failed: %s", err) + c.Fatalf("test9 - failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { - t.Fatalf("test9 should have used root Dockerfile, output:%s", out) + c.Fatalf("test9 should have used root Dockerfile, output:%s", out) } - out, _, err = dockerCmdInDir(t, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") + out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") if err != nil { - t.Fatalf("test10 should have worked: %s", err) + c.Fatalf("test10 should have worked: %s", err) } if !strings.Contains(out, "from files/dFile2") { - t.Fatalf("test10 should have used files/dFile2, output:%s", out) + c.Fatalf("test10 should have used files/dFile2, output:%s", out) } - logDone("build - rename dockerfile") } -func TestBuildFromMixedcaseDockerfile(t *testing.T) { - testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows - defer deleteImages("test1") +func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows ctx, err := fakeContext(`FROM busybox RUN echo from dockerfile`, @@ -5103,24 +4808,22 @@ func TestBuildFromMixedcaseDockerfile(t *testing.T) { }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { - t.Fatalf("Failed to build: %s\n%s", out, err) + c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from dockerfile") { - t.Fatalf("Missing proper output: %s", out) + c.Fatalf("Missing proper output: %s", out) } - logDone("build - mixedcase Dockerfile") } -func TestBuildWithTwoDockerfiles(t *testing.T) { - testRequires(t, UnixCli) // Dockerfile overwrites dockerfile on windows - defer deleteImages("test1") +func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { + testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, @@ -5129,30 +4832,28 @@ RUN echo from Dockerfile`, }) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-t", "test1", ".") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { - t.Fatalf("Failed to build: %s\n%s", out, err) + c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { - t.Fatalf("Missing proper output: %s", out) + c.Fatalf("Missing proper output: %s", out) } - logDone("build - two Dockerfiles") } -func TestBuildFromURLWithF(t *testing.T) { - defer deleteImages("test1") +func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { server, err := fakeStorage(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer server.Close() @@ -5161,34 +4862,32 @@ RUN echo from Dockerfile`, map[string]string{}) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir - out, _, err := dockerCmdInDir(t, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") + out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") if err != nil { - t.Fatalf("Failed to build: %s\n%s", out, err) + c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { - t.Fatalf("Missing proper output: %s", out) + c.Fatalf("Missing proper output: %s", out) } - logDone("build - from URL with -f") } -func TestBuildFromStdinWithF(t *testing.T) { - defer deleteImages("test1") +func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) defer ctx.Close() if err != nil { - t.Fatal(err) + c.Fatal(err) } // Make sure that -f is ignored and that we don't use the Dockerfile @@ -5201,19 +4900,18 @@ COPY * /tmp/ RUN find /tmp/`) out, status, err := runCommandWithOutput(dockerCommand) if err != nil || status != 0 { - t.Fatalf("Error building: %s", err) + c.Fatalf("Error building: %s", err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { - t.Fatalf("Missing proper output: %s", out) + c.Fatalf("Missing proper output: %s", out) } - logDone("build - from stdin with -f") } -func TestBuildFromOfficialNames(t *testing.T) { +func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { name := "testbuildfromofficial" fromNames := []string{ "busybox", @@ -5227,45 +4925,44 @@ func TestBuildFromOfficialNames(t *testing.T) { imgName := fmt.Sprintf("%s%d", name, idx) _, err := buildImage(imgName, "FROM "+fromName, true) if err != nil { - t.Errorf("Build failed using FROM %s: %s", fromName, err) + c.Errorf("Build failed using FROM %s: %s", fromName, err) } deleteImages(imgName) } - logDone("build - from official names") } -func TestBuildDockerfileOutsideContext(t *testing.T) { - testRequires(t, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) +func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { + testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) name := "testbuilddockerfileoutsidecontext" tmpdir, err := ioutil.TempDir("", name) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } wd, err := os.Getwd() if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.Chdir(wd) if err := os.Chdir(ctx); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { - t.Fatal(err) + c.Fatal(err) } for _, dockerfilePath := range []string{ @@ -5275,10 +4972,10 @@ func TestBuildDockerfileOutsideContext(t *testing.T) { } { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")) if err == nil { - t.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) + c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) } if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { - t.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) + c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) } deleteImages(name) } @@ -5289,14 +4986,11 @@ func TestBuildDockerfileOutsideContext(t *testing.T) { // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)) if err == nil { - t.Fatalf("Expected error. Out: %s", out) + c.Fatalf("Expected error. Out: %s", out) } - deleteImages(name) - - logDone("build - Dockerfile outside context") } -func TestBuildSpaces(t *testing.T) { +func (s *DockerSuite) TestBuildSpaces(c *check.C) { // Test to make sure that leading/trailing spaces on a command // doesn't change the error msg we get var ( @@ -5305,23 +4999,22 @@ func TestBuildSpaces(t *testing.T) { ) name := "testspaces" - defer deleteImages(name) ctx, err := fakeContext("FROM busybox\nCOPY\n", map[string]string{ "Dockerfile": "FROM busybox\nCOPY\n", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { - t.Fatal("Build 1 was supposed to fail, but didn't") + c.Fatal("Build 1 was supposed to fail, but didn't") } ctx.Add("Dockerfile", "FROM busybox\nCOPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - t.Fatal("Build 2 was supposed to fail, but didn't") + c.Fatal("Build 2 was supposed to fail, but didn't") } removeLogTimestamps := func(s string) string { @@ -5334,12 +5027,12 @@ func TestBuildSpaces(t *testing.T) { // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - t.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) + c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - t.Fatal("Build 3 was supposed to fail, but didn't") + c.Fatal("Build 3 was supposed to fail, but didn't") } // Skip over the times @@ -5348,12 +5041,12 @@ func TestBuildSpaces(t *testing.T) { // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - t.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) + c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - t.Fatal("Build 4 was supposed to fail, but didn't") + c.Fatal("Build 4 was supposed to fail, but didn't") } // Skip over the times @@ -5362,16 +5055,14 @@ func TestBuildSpaces(t *testing.T) { // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - t.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) + c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) } - logDone("build - test spaces") } -func TestBuildSpacesWithQuotes(t *testing.T) { +func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { // Test to make sure that spaces in quotes aren't lost name := "testspacesquotes" - defer deleteImages(name) dockerfile := `FROM busybox RUN echo " \ @@ -5379,19 +5070,18 @@ RUN echo " \ _, out, err := buildImageWithOut(name, dockerfile, false) if err != nil { - t.Fatal("Build failed:", err) + c.Fatal("Build failed:", err) } expecting := "\n foo \n" if !strings.Contains(out, expecting) { - t.Fatalf("Bad output: %q expecting to contian %q", out, expecting) + c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) } - logDone("build - test spaces with quotes") } // #4393 -func TestBuildVolumeFileExistsinContainer(t *testing.T) { +func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") buildCmd.Stdin = strings.NewReader(` FROM busybox @@ -5401,13 +5091,12 @@ func TestBuildVolumeFileExistsinContainer(t *testing.T) { out, _, err := runCommandWithOutput(buildCmd) if err == nil || !strings.Contains(out, "file exists") { - t.Fatalf("expected build to fail when file exists in container at requested volume path") + c.Fatalf("expected build to fail when file exists in container at requested volume path") } - logDone("build - errors when volume is specified where a file exists") } -func TestBuildMissingArgs(t *testing.T) { +func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { // Test to make sure that all Dockerfile commands (except the ones listed // in skipCmds) will generate an error if no args are provided. // Note: INSERT is deprecated so we exclude it because of that. @@ -5418,8 +5107,6 @@ func TestBuildMissingArgs(t *testing.T) { "INSERT": {}, } - defer deleteAllContainers() - for cmd := range command.Commands { cmd = strings.ToUpper(cmd) if _, ok := skipCmds[cmd]; ok { @@ -5436,57 +5123,50 @@ func TestBuildMissingArgs(t *testing.T) { ctx, err := fakeContext(dockerfile, map[string]string{}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() var out string if out, err = buildImageFromContext("args", ctx, true); err == nil { - t.Fatalf("%s was supposed to fail. Out:%s", cmd, out) + c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) } if !strings.Contains(err.Error(), cmd+" requires") { - t.Fatalf("%s returned the wrong type of error:%s", cmd, err) + c.Fatalf("%s returned the wrong type of error:%s", cmd, err) } } - logDone("build - verify missing args") } -func TestBuildEmptyScratch(t *testing.T) { - defer deleteImages("sc") +func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { _, out, err := buildImageWithOut("sc", "FROM scratch", true) if err == nil { - t.Fatalf("Build was supposed to fail") + c.Fatalf("Build was supposed to fail") } if !strings.Contains(out, "No image was generated") { - t.Fatalf("Wrong error message: %v", out) + c.Fatalf("Wrong error message: %v", out) } - logDone("build - empty scratch Dockerfile") } -func TestBuildDotDotFile(t *testing.T) { - defer deleteImages("sc") +func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { ctx, err := fakeContext("FROM busybox\n", map[string]string{ "..gitme": "", }) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext("sc", ctx, false); err != nil { - t.Fatalf("Build was supposed to work: %s", err) + c.Fatalf("Build was supposed to work: %s", err) } - logDone("build - ..file") } -func TestBuildNotVerbose(t *testing.T) { - defer deleteAllContainers() - defer deleteImages("verbose") +func (s *DockerSuite) TestBuildNotVerbose(c *check.C) { ctx, err := fakeContext("FROM busybox\nENV abc=hi\nRUN echo $abc there", map[string]string{}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() @@ -5495,10 +5175,10 @@ func TestBuildNotVerbose(t *testing.T) { buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err != nil { - t.Fatalf("failed to build the image w/o -q: %s, %v", out, err) + c.Fatalf("failed to build the image w/o -q: %s, %v", out, err) } if !strings.Contains(out, "hi there") { - t.Fatalf("missing output:%s\n", out) + c.Fatalf("missing output:%s\n", out) } // Now do it w/o verbose @@ -5506,25 +5186,21 @@ func TestBuildNotVerbose(t *testing.T) { buildCmd.Dir = ctx.Dir out, _, err = runCommandWithOutput(buildCmd) if err != nil { - t.Fatalf("failed to build the image w/ -q: %s, %v", out, err) + c.Fatalf("failed to build the image w/ -q: %s, %v", out, err) } if strings.Contains(out, "hi there") { - t.Fatalf("Bad output, should not contain 'hi there':%s", out) + c.Fatalf("Bad output, should not contain 'hi there':%s", out) } - logDone("build - not verbose") } -func TestBuildRUNoneJSON(t *testing.T) { +func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { name := "testbuildrunonejson" - defer deleteAllContainers() - defer deleteImages(name, "hello-world") - ctx, err := fakeContext(`FROM hello-world:frozen RUN [ "/hello" ]`, map[string]string{}) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer ctx.Close() @@ -5532,90 +5208,80 @@ RUN [ "/hello" ]`, map[string]string{}) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err != nil { - t.Fatalf("failed to build the image: %s, %v", out, err) + c.Fatalf("failed to build the image: %s, %v", out, err) } if !strings.Contains(out, "Hello from Docker") { - t.Fatalf("bad output: %s", out) + c.Fatalf("bad output: %s", out) } - logDone("build - RUN with one JSON arg") } -func TestBuildResourceConstraintsAreUsed(t *testing.T) { +func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { name := "testbuildresourceconstraints" - defer deleteAllContainers() - defer deleteImages(name, "hello-world") ctx, err := fakeContext(` FROM hello-world:frozen RUN ["/hello"] `, map[string]string{}) if err != nil { - t.Fatal(err) + c.Fatal(err) } - cmd := exec.Command(dockerBinary, "build", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpu-shares=100", "-t", name, ".") + cmd := exec.Command(dockerBinary, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "-t", name, ".") cmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) - } - out, _, err = dockerCmd(t, "ps", "-lq") - if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } + out, _ = dockerCmd(c, "ps", "-lq") cID := strings.TrimSpace(out) type hostConfig struct { - Memory float64 // Use float64 here since the json decoder sees it that way - MemorySwap int + Memory int64 + MemorySwap int64 CpusetCpus string - CpuShares int + CpusetMems string + CpuShares int64 + CpuQuota int64 } cfg, err := inspectFieldJSON(cID, "HostConfig") if err != nil { - t.Fatal(err) + c.Fatal(err) } var c1 hostConfig if err := json.Unmarshal([]byte(cfg), &c1); err != nil { - t.Fatal(err, cfg) + c.Fatal(err, cfg) } - mem := int64(c1.Memory) - if mem != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "0" || c1.CpuShares != 100 { - t.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d", - mem, c1.MemorySwap, c1.CpusetCpus, c1.CpuShares) + if c1.Memory != 67108864 || c1.MemorySwap != -1 || c1.CpusetCpus != "0" || c1.CpusetMems != "0" || c1.CpuShares != 100 || c1.CpuQuota != 8000 { + c.Fatalf("resource constraints not set properly:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CpuShares: %d, CpuQuota: %d", + c1.Memory, c1.MemorySwap, c1.CpusetCpus, c1.CpusetMems, c1.CpuShares, c1.CpuQuota) } // Make sure constraints aren't saved to image - _, _, err = dockerCmd(t, "run", "--name=test", name) - if err != nil { - t.Fatal(err) - } + _, _ = dockerCmd(c, "run", "--name=test", name) + cfg, err = inspectFieldJSON("test", "HostConfig") if err != nil { - t.Fatal(err) + c.Fatal(err) } var c2 hostConfig if err := json.Unmarshal([]byte(cfg), &c2); err != nil { - t.Fatal(err, cfg) + c.Fatal(err, cfg) } - mem = int64(c2.Memory) - if mem == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "0" || c2.CpuShares == 100 { - t.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpuShares: %d", - mem, c2.MemorySwap, c2.CpusetCpus, c2.CpuShares) + if c2.Memory == 67108864 || c2.MemorySwap == -1 || c2.CpusetCpus == "0" || c2.CpusetMems == "0" || c2.CpuShares == 100 || c2.CpuQuota == 8000 { + c.Fatalf("resource constraints leaked from build:\nMemory: %d, MemSwap: %d, CpusetCpus: %s, CpusetMems: %s, CpuShares: %d, CpuQuota: %d", + c2.Memory, c2.MemorySwap, c2.CpusetCpus, c2.CpusetMems, c2.CpuShares, c2.CpuQuota) } - logDone("build - resource constraints applied") } -func TestBuildEmptyStringVolume(t *testing.T) { +func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { name := "testbuildemptystringvolume" - defer deleteImages(name) _, err := buildImage(name, ` FROM busybox @@ -5623,8 +5289,7 @@ func TestBuildEmptyStringVolume(t *testing.T) { VOLUME $foo `, false) if err == nil { - t.Fatal("Should have failed to build") + c.Fatal("Should have failed to build") } - logDone("build - empty string volume") } diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go index 24ebf0cf70b7c..b9b319cf94a41 100644 --- a/integration-cli/docker_cli_by_digest_test.go +++ b/integration-cli/docker_cli_by_digest_test.go @@ -5,9 +5,9 @@ import ( "os/exec" "regexp" "strings" - "testing" "github.com/docker/docker/utils" + "github.com/go-check/check" ) var ( @@ -22,18 +22,17 @@ func setupImage() (string, error) { func setupImageWithTag(tag string) (string, error) { containerName := "busyboxbydigest" - c := exec.Command(dockerBinary, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") - if _, err := runCommand(c); err != nil { + cmd := exec.Command(dockerBinary, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") + if _, err := runCommand(cmd); err != nil { return "", err } // tag the image to upload it to the private registry repoAndTag := utils.ImageReference(repoName, tag) - c = exec.Command(dockerBinary, "commit", containerName, repoAndTag) - if out, _, err := runCommandWithOutput(c); err != nil { + cmd = exec.Command(dockerBinary, "commit", containerName, repoAndTag) + if out, _, err := runCommandWithOutput(cmd); err != nil { return "", fmt.Errorf("image tagging failed: %s, %v", out, err) } - defer deleteImages(repoAndTag) // delete the container as we don't need it any more if err := deleteContainer(containerName); err != nil { @@ -41,15 +40,15 @@ func setupImageWithTag(tag string) (string, error) { } // push the image - c = exec.Command(dockerBinary, "push", repoAndTag) - out, _, err := runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "push", repoAndTag) + out, _, err := runCommandWithOutput(cmd) if err != nil { return "", fmt.Errorf("pushing the image to the private registry has failed: %s, %v", out, err) } // delete our local repo that we previously tagged - c = exec.Command(dockerBinary, "rmi", repoAndTag) - if out, _, err := runCommandWithOutput(c); err != nil { + cmd = exec.Command(dockerBinary, "rmi", repoAndTag) + if out, _, err := runCommandWithOutput(cmd); err != nil { return "", fmt.Errorf("error deleting images prior to real test: %s, %v", out, err) } @@ -63,473 +62,426 @@ func setupImageWithTag(tag string) (string, error) { return pushDigest, nil } -func TestPullByTagDisplaysDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { pushDigest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } // pull from the registry using the tag - c := exec.Command(dockerBinary, "pull", repoName) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", repoName) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by tag: %s, %v", out, err) + c.Fatalf("error pulling by tag: %s, %v", out, err) } - defer deleteImages(repoName) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) if len(matches) != 2 { - t.Fatalf("unable to parse digest from pull output: %s", out) + c.Fatalf("unable to parse digest from pull output: %s", out) } pullDigest := matches[1] // make sure the pushed and pull digests match if pushDigest != pullDigest { - t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) + c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) } - - logDone("by_digest - pull by tag displays digest") } -func TestPullByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { pushDigest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } - defer deleteImages(imageReference) // the pull output includes "Digest: ", so find that matches := digestRegex.FindStringSubmatch(out) if len(matches) != 2 { - t.Fatalf("unable to parse digest from pull output: %s", out) + c.Fatalf("unable to parse digest from pull output: %s", out) } pullDigest := matches[1] // make sure the pushed and pull digests match if pushDigest != pullDigest { - t.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) + c.Fatalf("push digest %q didn't match pull digest %q", pushDigest, pullDigest) } - - logDone("by_digest - pull by digest") } -func TestCreateByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { pushDigest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "createByDigest" - c := exec.Command(dockerBinary, "create", "--name", containerName, imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "create", "--name", containerName, imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error creating by digest: %s, %v", out, err) + c.Fatalf("error creating by digest: %s, %v", out, err) } - defer deleteContainer(containerName) res, err := inspectField(containerName, "Config.Image") if err != nil { - t.Fatalf("failed to get Config.Image: %s, %v", out, err) + c.Fatalf("failed to get Config.Image: %s, %v", out, err) } if res != imageReference { - t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) + c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) } - - logDone("by_digest - create by digest") } -func TestRunByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { pushDigest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) containerName := "runByDigest" - c := exec.Command(dockerBinary, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error run by digest: %s, %v", out, err) + c.Fatalf("error run by digest: %s, %v", out, err) } - defer deleteContainer(containerName) foundRegex := regexp.MustCompile("found=([^\n]+)") matches := foundRegex.FindStringSubmatch(out) if len(matches) != 2 { - t.Fatalf("error locating expected 'found=1' output: %s", out) + c.Fatalf("error locating expected 'found=1' output: %s", out) } if matches[1] != "1" { - t.Fatalf("Expected %q, got %q", "1", matches[1]) + c.Fatalf("Expected %q, got %q", "1", matches[1]) } res, err := inspectField(containerName, "Config.Image") if err != nil { - t.Fatalf("failed to get Config.Image: %s, %v", out, err) + c.Fatalf("failed to get Config.Image: %s, %v", out, err) } if res != imageReference { - t.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) + c.Fatalf("unexpected Config.Image: %s (expected %s)", res, imageReference) } - - logDone("by_digest - run by digest") } -func TestRemoveImageByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { digest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // make sure inspect runs ok if _, err := inspectField(imageReference, "Id"); err != nil { - t.Fatalf("failed to inspect image: %v", err) + c.Fatalf("failed to inspect image: %v", err) } // do the delete if err := deleteImages(imageReference); err != nil { - t.Fatalf("unexpected error deleting image: %v", err) + c.Fatalf("unexpected error deleting image: %v", err) } // try to inspect again - it should error this time if _, err := inspectField(imageReference, "Id"); err == nil { - t.Fatalf("unexpected nil err trying to inspect what should be a non-existent image") + c.Fatalf("unexpected nil err trying to inspect what should be a non-existent image") } else if !strings.Contains(err.Error(), "No such image") { - t.Fatalf("expected 'No such image' output, got %v", err) + c.Fatalf("expected 'No such image' output, got %v", err) } - - logDone("by_digest - remove image by digest") } -func TestBuildByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { digest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // get the image id imageID, err := inspectField(imageReference, "Id") if err != nil { - t.Fatalf("error getting image id: %v", err) + c.Fatalf("error getting image id: %v", err) } // do the build name := "buildbydigest" - defer deleteImages(name) _, err = buildImage(name, fmt.Sprintf( `FROM %s CMD ["/bin/echo", "Hello World"]`, imageReference), true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // get the build's image id res, err := inspectField(name, "Config.Image") if err != nil { - t.Fatal(err) + c.Fatal(err) } // make sure they match if res != imageID { - t.Fatalf("Image %s, expected %s", res, imageID) + c.Fatalf("Image %s, expected %s", res, imageID) } - - logDone("by_digest - build by digest") } -func TestTagByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { digest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // tag it tag := "tagbydigest" - c = exec.Command(dockerBinary, "tag", imageReference, tag) - if _, err := runCommand(c); err != nil { - t.Fatalf("unexpected error tagging: %v", err) + cmd = exec.Command(dockerBinary, "tag", imageReference, tag) + if _, err := runCommand(cmd); err != nil { + c.Fatalf("unexpected error tagging: %v", err) } expectedID, err := inspectField(imageReference, "Id") if err != nil { - t.Fatalf("error getting original image id: %v", err) + c.Fatalf("error getting original image id: %v", err) } tagID, err := inspectField(tag, "Id") if err != nil { - t.Fatalf("error getting tagged image id: %v", err) + c.Fatalf("error getting tagged image id: %v", err) } if tagID != expectedID { - t.Fatalf("expected image id %q, got %q", expectedID, tagID) + c.Fatalf("expected image id %q, got %q", expectedID, tagID) } - - logDone("by_digest - tag by digest") } -func TestListImagesWithoutDigests(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { digest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference := fmt.Sprintf("%s@%s", repoName, digest) // pull from the registry using the @ reference - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } - c = exec.Command(dockerBinary, "images") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } if strings.Contains(out, "DIGEST") { - t.Fatalf("list output should not have contained DIGEST header: %s", out) + c.Fatalf("list output should not have contained DIGEST header: %s", out) } - logDone("by_digest - list images - digest header not displayed by default") } -func TestListImagesWithDigests(t *testing.T) { - defer setupRegistry(t)() - defer deleteImages(repoName+":tag1", repoName+":tag2") +func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { // setup image1 digest1, err := setupImageWithTag("tag1") if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) - defer deleteImages(imageReference1) - t.Logf("imageReference1 = %s", imageReference1) + c.Logf("imageReference1 = %s", imageReference1) // pull image1 by digest - c := exec.Command(dockerBinary, "pull", imageReference1) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference1) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // list images - c = exec.Command(dockerBinary, "images", "--digests") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "--digests") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } // make sure repo shown, tag=, digest = $digest1 re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1 + `\s`) if !re1.MatchString(out) { - t.Fatalf("expected %q: %s", re1.String(), out) + c.Fatalf("expected %q: %s", re1.String(), out) } // setup image2 digest2, err := setupImageWithTag("tag2") if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) - defer deleteImages(imageReference2) - t.Logf("imageReference2 = %s", imageReference2) + c.Logf("imageReference2 = %s", imageReference2) // pull image1 by digest - c = exec.Command(dockerBinary, "pull", imageReference1) - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "pull", imageReference1) + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // pull image2 by digest - c = exec.Command(dockerBinary, "pull", imageReference2) - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "pull", imageReference2) + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // list images - c = exec.Command(dockerBinary, "images", "--digests") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "--digests") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } // make sure repo shown, tag=, digest = $digest1 if !re1.MatchString(out) { - t.Fatalf("expected %q: %s", re1.String(), out) + c.Fatalf("expected %q: %s", re1.String(), out) } // make sure repo shown, tag=, digest = $digest2 re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2 + `\s`) if !re2.MatchString(out) { - t.Fatalf("expected %q: %s", re2.String(), out) + c.Fatalf("expected %q: %s", re2.String(), out) } // pull tag1 - c = exec.Command(dockerBinary, "pull", repoName+":tag1") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "pull", repoName+":tag1") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling tag1: %s, %v", out, err) + c.Fatalf("error pulling tag1: %s, %v", out, err) } // list images - c = exec.Command(dockerBinary, "images", "--digests") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "--digests") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, AND repo, , digest reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*\s`) reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1 + `\s`) if !reWithTag1.MatchString(out) { - t.Fatalf("expected %q: %s", reWithTag1.String(), out) + c.Fatalf("expected %q: %s", reWithTag1.String(), out) } if !reWithDigest1.MatchString(out) { - t.Fatalf("expected %q: %s", reWithDigest1.String(), out) + c.Fatalf("expected %q: %s", reWithDigest1.String(), out) } // make sure image 2 has repo, , digest if !re2.MatchString(out) { - t.Fatalf("expected %q: %s", re2.String(), out) + c.Fatalf("expected %q: %s", re2.String(), out) } // pull tag 2 - c = exec.Command(dockerBinary, "pull", repoName+":tag2") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "pull", repoName+":tag2") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling tag2: %s, %v", out, err) + c.Fatalf("error pulling tag2: %s, %v", out, err) } // list images - c = exec.Command(dockerBinary, "images", "--digests") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "--digests") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, digest if !reWithTag1.MatchString(out) { - t.Fatalf("expected %q: %s", re1.String(), out) + c.Fatalf("expected %q: %s", re1.String(), out) } // make sure image 2 has repo, tag, digest reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*\s`) reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2 + `\s`) if !reWithTag2.MatchString(out) { - t.Fatalf("expected %q: %s", reWithTag2.String(), out) + c.Fatalf("expected %q: %s", reWithTag2.String(), out) } if !reWithDigest2.MatchString(out) { - t.Fatalf("expected %q: %s", reWithDigest2.String(), out) + c.Fatalf("expected %q: %s", reWithDigest2.String(), out) } // list images - c = exec.Command(dockerBinary, "images", "--digests") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "--digests") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error listing images: %s, %v", out, err) + c.Fatalf("error listing images: %s, %v", out, err) } // make sure image 1 has repo, tag, digest if !reWithTag1.MatchString(out) { - t.Fatalf("expected %q: %s", re1.String(), out) + c.Fatalf("expected %q: %s", re1.String(), out) } // make sure image 2 has repo, tag, digest if !reWithTag2.MatchString(out) { - t.Fatalf("expected %q: %s", re2.String(), out) + c.Fatalf("expected %q: %s", re2.String(), out) } // make sure busybox has tag, but not digest busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) if !busyboxRe.MatchString(out) { - t.Fatalf("expected %q: %s", busyboxRe.String(), out) + c.Fatalf("expected %q: %s", busyboxRe.String(), out) } - - logDone("by_digest - list images with digests") } -func TestDeleteImageByIDOnlyPulledByDigest(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { pushDigest, err := setupImage() if err != nil { - t.Fatalf("error setting up image: %v", err) + c.Fatalf("error setting up image: %v", err) } // pull from the registry using the @ reference imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - c := exec.Command(dockerBinary, "pull", imageReference) - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "pull", imageReference) + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error pulling by digest: %s, %v", out, err) + c.Fatalf("error pulling by digest: %s, %v", out, err) } // just in case... - defer deleteImages(imageReference) imageID, err := inspectField(imageReference, ".Id") if err != nil { - t.Fatalf("error inspecting image id: %v", err) + c.Fatalf("error inspecting image id: %v", err) } - c = exec.Command(dockerBinary, "rmi", imageID) - if _, err := runCommand(c); err != nil { - t.Fatalf("error deleting image by id: %v", err) + cmd = exec.Command(dockerBinary, "rmi", imageID) + if _, err := runCommand(cmd); err != nil { + c.Fatalf("error deleting image by id: %v", err) } - - logDone("by_digest - delete image by id only pulled by digest") } diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go index 3143c21fcdf11..391cd4ebc5a34 100644 --- a/integration-cli/docker_cli_commit_test.go +++ b/integration-cli/docker_cli_commit_test.go @@ -3,149 +3,132 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestCommitAfterContainerIsDone(t *testing.T) { +func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %s, %v", out, err) + c.Fatalf("failed to run container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) if _, _, err = runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + c.Fatalf("error thrown while waiting for container: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { - t.Fatalf("failed to commit container to image: %s, %v", out, err) + c.Fatalf("failed to commit container to image: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("failed to inspect image: %s, %v", out, err) + c.Fatalf("failed to inspect image: %s, %v", out, err) } - - deleteContainer(cleanedContainerID) - deleteImages(cleanedImageID) - - logDone("commit - echo foo and commit the image") } -func TestCommitWithoutPause(t *testing.T) { +func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %s, %v", out, err) + c.Fatalf("failed to run container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) waitCmd := exec.Command(dockerBinary, "wait", cleanedContainerID) if _, _, err = runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("error thrown while waiting for container: %s, %v", out, err) + c.Fatalf("error thrown while waiting for container: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", "-p=false", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { - t.Fatalf("failed to commit container to image: %s, %v", out, err) + c.Fatalf("failed to commit container to image: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("failed to inspect image: %s, %v", out, err) + c.Fatalf("failed to inspect image: %s, %v", out, err) } - - deleteContainer(cleanedContainerID) - deleteImages(cleanedImageID) - - logDone("commit - echo foo and commit the image with --pause=false") } //test commit a paused container should not unpause it after commit -func TestCommitPausedContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { defer unpauseAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox") out, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cleanedContainerID := strings.TrimSpace(out) cmd = exec.Command(dockerBinary, "pause", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to pause container: %v, output: %q", err, out) + c.Fatalf("failed to pause container: %v, output: %q", err, out) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID) out, _, err = runCommandWithOutput(commitCmd) if err != nil { - t.Fatalf("failed to commit container to image: %s, %v", out, err) + c.Fatalf("failed to commit container to image: %s, %v", out, err) } - cleanedImageID := strings.TrimSpace(out) - defer deleteImages(cleanedImageID) cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Paused}}", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to inspect container: %v, output: %q", err, out) + c.Fatalf("failed to inspect container: %v, output: %q", err, out) } if !strings.Contains(out, "true") { - t.Fatalf("commit should not unpause a paused container") + c.Fatalf("commit should not unpause a paused container") } - logDone("commit - commit a paused container will not unpause it") } -func TestCommitNewFile(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCommitNewFile(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "commiter") imageID, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } imageID = strings.Trim(imageID, "\r\n") - defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", imageID, "cat", "/foo") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "koye" { - t.Fatalf("expected output koye received %q", actual) + c.Fatalf("expected output koye received %q", actual) } - logDone("commit - commit file and read") } -func TestCommitHardlink(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCommitHardlink(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") firstOuput, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } chunks := strings.Split(strings.TrimSpace(firstOuput), " ") @@ -158,21 +141,20 @@ func TestCommitHardlink(t *testing.T) { } } if !found { - t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) } cmd = exec.Command(dockerBinary, "commit", "hardlinks", "hardlinks") imageID, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(imageID, err) + c.Fatal(imageID, err) } imageID = strings.Trim(imageID, "\r\n") - defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") secondOuput, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } chunks = strings.Split(strings.TrimSpace(secondOuput), " ") @@ -185,68 +167,60 @@ func TestCommitHardlink(t *testing.T) { } } if !found { - t.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) + c.Fatalf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:]) } - logDone("commit - commit hardlinks") } -func TestCommitTTY(t *testing.T) { - defer deleteImages("ttytest") - defer deleteAllContainers() +func (s *DockerSuite) TestCommitTTY(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-t", "--name", "tty", "busybox", "/bin/ls") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "tty", "ttytest") imageID, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } imageID = strings.Trim(imageID, "\r\n") cmd = exec.Command(dockerBinary, "run", "ttytest", "/bin/ls") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("commit - commit tty") } -func TestCommitWithHostBindMount(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", "bind-commit", "bindtest") imageID, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(imageID, err) + c.Fatal(imageID, err) } imageID = strings.Trim(imageID, "\r\n") - defer deleteImages(imageID) cmd = exec.Command(dockerBinary, "run", "bindtest", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("commit - commit bind mounted file") } -func TestCommitChange(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCommitChange(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "test", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "commit", @@ -257,25 +231,70 @@ func TestCommitChange(t *testing.T) { "test", "test-commit") imageId, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(imageId, err) + c.Fatal(imageId, err) } imageId = strings.Trim(imageId, "\r\n") - defer deleteImages(imageId) expected := map[string]string{ - "Config.ExposedPorts": "map[8080/tcp:map[]]", + "Config.ExposedPorts": "map[8080/tcp:{}]", "Config.Env": "[DEBUG=true test=1 PATH=/foo]", } for conf, value := range expected { res, err := inspectField(imageId, conf) if err != nil { - t.Errorf("failed to get value %s, error: %s", conf, err) + c.Errorf("failed to get value %s, error: %s", conf, err) } if res != value { - t.Errorf("%s('%s'), expected %s", conf, res, value) + c.Errorf("%s('%s'), expected %s", conf, res, value) } } - logDone("commit - commit --change") +} + +// TODO: commit --run is deprecated, remove this once --run is removed +func (s *DockerSuite) TestCommitMergeConfigRun(c *check.C) { + name := "commit-test" + out, _ := dockerCmd(c, "run", "-d", "-e=FOO=bar", "busybox", "/bin/sh", "-c", "echo testing > /tmp/foo") + id := strings.TrimSpace(out) + + dockerCmd(c, "commit", `--run={"Cmd": ["cat", "/tmp/foo"]}`, id, "commit-test") + + out, _ = dockerCmd(c, "run", "--name", name, "commit-test") + if strings.TrimSpace(out) != "testing" { + c.Fatal("run config in committed container was not merged") + } + + type cfg struct { + Env []string + Cmd []string + } + config1 := cfg{} + if err := inspectFieldAndMarshall(id, "Config", &config1); err != nil { + c.Fatal(err) + } + config2 := cfg{} + if err := inspectFieldAndMarshall(name, "Config", &config2); err != nil { + c.Fatal(err) + } + + // Env has at least PATH loaded as well here, so let's just grab the FOO one + var env1, env2 string + for _, e := range config1.Env { + if strings.HasPrefix(e, "FOO") { + env1 = e + break + } + } + for _, e := range config2.Env { + if strings.HasPrefix(e, "FOO") { + env2 = e + break + } + } + + if len(config1.Env) != len(config2.Env) || env1 != env2 && env2 != "" { + c.Fatalf("expected envs to match: %v - %v", config1.Env, config2.Env) + } + } diff --git a/integration-cli/docker_cli_config_test.go b/integration-cli/docker_cli_config_test.go new file mode 100644 index 0000000000000..5ccd7af10e0cb --- /dev/null +++ b/integration-cli/docker_cli_config_test.go @@ -0,0 +1,56 @@ +package main + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + + "github.com/docker/docker/pkg/homedir" + "github.com/go-check/check" +) + +func (s *DockerSuite) TestConfigHttpHeader(c *check.C) { + testRequires(c, UnixCli) // Can't set/unset HOME on windows right now + // We either need a level of Go that supports Unsetenv (for cases + // when HOME/USERPROFILE isn't set), or we need to be able to use + // os/user but user.Current() only works if we aren't statically compiling + + var headers map[string][]string + + server := httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + headers = r.Header + })) + defer server.Close() + + homeKey := homedir.Key() + homeVal := homedir.Get() + tmpDir, _ := ioutil.TempDir("", "fake-home") + defer os.RemoveAll(tmpDir) + + dotDocker := filepath.Join(tmpDir, ".docker") + os.Mkdir(dotDocker, 0600) + tmpCfg := filepath.Join(dotDocker, "config.json") + + defer func() { os.Setenv(homeKey, homeVal) }() + os.Setenv(homeKey, tmpDir) + + data := `{ + "HttpHeaders": { "MyHeader": "MyValue" } + }` + + err := ioutil.WriteFile(tmpCfg, []byte(data), 0600) + if err != nil { + c.Fatalf("Err creating file(%s): %v", tmpCfg, err) + } + + cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") + out, _, _ := runCommandWithOutput(cmd) + + if headers["Myheader"] == nil || headers["Myheader"][0] != "MyValue" { + c.Fatalf("Missing/bad header: %q\nout:%v", headers, out) + } +} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go index 37e4659e917ac..26e778e4f2b80 100644 --- a/integration-cli/docker_cli_cp_test.go +++ b/integration-cli/docker_cli_cp_test.go @@ -9,7 +9,8 @@ import ( "path" "path/filepath" "strings" - "testing" + + "github.com/go-check/check" ) const ( @@ -24,27 +25,26 @@ const ( // Test for #5656 // Check that garbage paths don't escape the container's rootfs -func TestCpGarbagePath(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpGarbagePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { - t.Fatal(err) + c.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) @@ -53,7 +53,7 @@ func TestCpGarbagePath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) @@ -61,52 +61,47 @@ func TestCpGarbagePath(t *testing.T) { path := path.Join("../../../../../../../../../../../../", cpFullPath) - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) - if err != nil { - t.Fatalf("couldn't copy from garbage path: %s:%s %s", cleanedContainerID, path, err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { - t.Fatal(err) + c.Fatal(err) } if string(test) == cpHostContents { - t.Errorf("output matched host file -- garbage path can escape container rootfs") + c.Errorf("output matched host file -- garbage path can escape container rootfs") } if string(test) != cpContainerContents { - t.Errorf("output doesn't match the input for garbage path") + c.Errorf("output doesn't match the input for garbage path") } - logDone("cp - garbage paths relative to container's rootfs") } // Check that relative paths are relative to the container's rootfs -func TestCpRelativePath(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpRelativePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { - t.Fatal(err) + c.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) @@ -116,7 +111,7 @@ func TestCpRelativePath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) @@ -128,55 +123,50 @@ func TestCpRelativePath(t *testing.T) { // get this unix-path manipulation on windows with filepath. relPath = cpFullPath[1:] } else { - t.Fatalf("path %s was assumed to be an absolute path", cpFullPath) + c.Fatalf("path %s was assumed to be an absolute path", cpFullPath) } - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+relPath, tmpdir) - if err != nil { - t.Fatalf("couldn't copy from relative path: %s:%s %s", cleanedContainerID, relPath, err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":"+relPath, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { - t.Fatal(err) + c.Fatal(err) } if string(test) == cpHostContents { - t.Errorf("output matched host file -- relative path can escape container rootfs") + c.Errorf("output matched host file -- relative path can escape container rootfs") } if string(test) != cpContainerContents { - t.Errorf("output doesn't match the input for relative path") + c.Errorf("output doesn't match the input for relative path") } - logDone("cp - relative paths relative to container's rootfs") } // Check that absolute paths are relative to the container's rootfs -func TestCpAbsolutePath(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { - t.Fatal(err) + c.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) @@ -186,7 +176,7 @@ func TestCpAbsolutePath(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) @@ -194,53 +184,48 @@ func TestCpAbsolutePath(t *testing.T) { path := cpFullPath - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) - if err != nil { - t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { - t.Fatal(err) + c.Fatal(err) } if string(test) == cpHostContents { - t.Errorf("output matched host file -- absolute path can escape container rootfs") + c.Errorf("output matched host file -- absolute path can escape container rootfs") } if string(test) != cpContainerContents { - t.Errorf("output doesn't match the input for absolute path") + c.Errorf("output doesn't match the input for absolute path") } - logDone("cp - absolute paths relative to container's rootfs") } // Test for #5619 // Check that absolute symlinks are still relative to the container's rootfs -func TestCpAbsoluteSymlink(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { - t.Fatal(err) + c.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) @@ -250,7 +235,7 @@ func TestCpAbsoluteSymlink(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) @@ -258,53 +243,48 @@ func TestCpAbsoluteSymlink(t *testing.T) { path := path.Join("/", "container_path") - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) - if err != nil { - t.Fatalf("couldn't copy from absolute path: %s:%s %s", cleanedContainerID, path, err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { - t.Fatal(err) + c.Fatal(err) } if string(test) == cpHostContents { - t.Errorf("output matched host file -- absolute symlink can escape container rootfs") + c.Errorf("output matched host file -- absolute symlink can escape container rootfs") } if string(test) != cpContainerContents { - t.Errorf("output doesn't match the input for absolute symlink") + c.Errorf("output doesn't match the input for absolute symlink") } - logDone("cp - absolute symlink relative to container's rootfs") } // Test for #5619 // Check that symlinks which are part of the resource path are still relative to the container's rootfs -func TestCpSymlinkComponent(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } if err := os.MkdirAll(cpTestPath, os.ModeDir); err != nil { - t.Fatal(err) + c.Fatal(err) } hostFile, err := os.Create(cpFullPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer hostFile.Close() defer os.RemoveAll(cpTestPathParent) @@ -314,7 +294,7 @@ func TestCpSymlinkComponent(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpname := filepath.Join(tmpdir, cpTestName) @@ -322,301 +302,297 @@ func TestCpSymlinkComponent(t *testing.T) { path := path.Join("/", "container_path", cpTestName) - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":"+path, tmpdir) - if err != nil { - t.Fatalf("couldn't copy from symlink path component: %s:%s %s", cleanedContainerID, path, err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":"+path, tmpdir) file, _ := os.Open(tmpname) defer file.Close() test, err := ioutil.ReadAll(file) if err != nil { - t.Fatal(err) + c.Fatal(err) } if string(test) == cpHostContents { - t.Errorf("output matched host file -- symlink path component can escape container rootfs") + c.Errorf("output matched host file -- symlink path component can escape container rootfs") } if string(test) != cpContainerContents { - t.Errorf("output doesn't match the input for symlink path component") + c.Errorf("output doesn't match the input for symlink path component") } - logDone("cp - symlink path components relative to container's rootfs") } // Check that cp with unprivileged user doesn't return any error -func TestCpUnprivilegedUser(t *testing.T) { - testRequires(t, UnixCli) // uses chmod/su: not available on windows +func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { + testRequires(c, UnixCli) // uses chmod/su: not available on windows - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpdir) if err = os.Chmod(tmpdir, 0777); err != nil { - t.Fatal(err) + c.Fatal(err) } path := cpTestName _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+cleanedContainerID+":"+path+" "+tmpdir)) if err != nil { - t.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) + c.Fatalf("couldn't copy with unprivileged user: %s:%s %s", cleanedContainerID, path, err) } - logDone("cp - unprivileged user") } -func TestCpSpecialFiles(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { + testRequires(c, SameHostDaemon) outDir, err := ioutil.TempDir("", "cp-test-special-files") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(outDir) - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } // Copy actual /etc/resolv.conf - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/etc/resolv.conf", outDir) - if err != nil { - t.Fatalf("couldn't copy from container: %s:%s %v", cleanedContainerID, "/etc/resolv.conf", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/resolv.conf", outDir) expected, err := ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/resolv.conf") actual, err := ioutil.ReadFile(outDir + "/resolv.conf") if !bytes.Equal(actual, expected) { - t.Fatalf("Expected copied file to be duplicate of the container resolvconf") + c.Fatalf("Expected copied file to be duplicate of the container resolvconf") } // Copy actual /etc/hosts - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/etc/hosts", outDir) - if err != nil { - t.Fatalf("couldn't copy from container: %s:%s %v", cleanedContainerID, "/etc/hosts", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/hosts", outDir) expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hosts") actual, err = ioutil.ReadFile(outDir + "/hosts") if !bytes.Equal(actual, expected) { - t.Fatalf("Expected copied file to be duplicate of the container hosts") + c.Fatalf("Expected copied file to be duplicate of the container hosts") } // Copy actual /etc/resolv.conf - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/etc/hostname", outDir) - if err != nil { - t.Fatalf("couldn't copy from container: %s:%s %v", cleanedContainerID, "/etc/hostname", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/etc/hostname", outDir) expected, err = ioutil.ReadFile("/var/lib/docker/containers/" + cleanedContainerID + "/hostname") actual, err = ioutil.ReadFile(outDir + "/hostname") if !bytes.Equal(actual, expected) { - t.Fatalf("Expected copied file to be duplicate of the container resolvconf") + c.Fatalf("Expected copied file to be duplicate of the container resolvconf") } - logDone("cp - special files (resolv.conf, hosts, hostname)") } -func TestCpVolumePath(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestCpVolumePath(c *check.C) { + testRequires(c, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(outDir) _, err = os.Create(tmpDir + "/test") if err != nil { - t.Fatal(err) + c.Fatal(err) } - out, exitCode, err := dockerCmd(t, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) + out, exitCode := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer dockerCmd(t, "rm", "-fv", cleanedContainerID) + defer dockerCmd(c, "rm", "-fv", cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } // Copy actual volume path - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo", outDir) - if err != nil { - t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/foo", outDir) + stat, err := os.Stat(outDir + "/foo") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !stat.IsDir() { - t.Fatal("expected copied content to be dir") + c.Fatal("expected copied content to be dir") } stat, err = os.Stat(outDir + "/foo/bar") if err != nil { - t.Fatal(err) + c.Fatal(err) } if stat.IsDir() { - t.Fatal("Expected file `bar` to be a file") + c.Fatal("Expected file `bar` to be a file") } // Copy file nested in volume - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/foo/bar", outDir) - if err != nil { - t.Fatalf("couldn't copy from volume path: %s:%s %v", cleanedContainerID, "/foo", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/foo/bar", outDir) + stat, err = os.Stat(outDir + "/bar") if err != nil { - t.Fatal(err) + c.Fatal(err) } if stat.IsDir() { - t.Fatal("Expected file `bar` to be a file") + c.Fatal("Expected file `bar` to be a file") } // Copy Bind-mounted dir - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz", outDir) - if err != nil { - t.Fatalf("couldn't copy from bind-mounted volume path: %s:%s %v", cleanedContainerID, "/baz", err) - } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/baz", outDir) stat, err = os.Stat(outDir + "/baz") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !stat.IsDir() { - t.Fatal("Expected `baz` to be a dir") + c.Fatal("Expected `baz` to be a dir") } // Copy file nested in bind-mounted dir - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/baz/test", outDir) + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/baz/test", outDir) fb, err := ioutil.ReadFile(outDir + "/baz/test") if err != nil { - t.Fatal(err) + c.Fatal(err) } fb2, err := ioutil.ReadFile(tmpDir + "/test") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(fb, fb2) { - t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + c.Fatalf("Expected copied file to be duplicate of bind-mounted file") } // Copy bind-mounted file - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", outDir) + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/test", outDir) fb, err = ioutil.ReadFile(outDir + "/test") if err != nil { - t.Fatal(err) + c.Fatal(err) } fb2, err = ioutil.ReadFile(tmpDir + "/test") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(fb, fb2) { - t.Fatalf("Expected copied file to be duplicate of bind-mounted file") + c.Fatalf("Expected copied file to be duplicate of bind-mounted file") } - logDone("cp - volume path") } -func TestCpToDot(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") - if err != nil || exitCode != 0 { - t.Fatal("failed to create a container", out, err) +func (s *DockerSuite) TestCpToDot(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if exitCode != 0 { + c.Fatal("failed to create a container", out) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) - out, _, err = dockerCmd(t, "wait", cleanedContainerID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) } tmpdir, err := ioutil.TempDir("", "docker-integration") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpdir) cwd, err := os.Getwd() if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.Chdir(cwd) if err := os.Chdir(tmpdir); err != nil { - t.Fatal(err) - } - _, _, err = dockerCmd(t, "cp", cleanedContainerID+":/test", ".") - if err != nil { - t.Fatalf("couldn't docker cp to \".\" path: %s", err) + c.Fatal(err) } + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/test", ".") content, err := ioutil.ReadFile("./test") if string(content) != "lololol\n" { - t.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") + c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") } - logDone("cp - to dot path") } -func TestCpToStdout(t *testing.T) { - out, exitCode, err := dockerCmd(t, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") - if err != nil || exitCode != 0 { - t.Fatalf("failed to create a container:%s\n%s", out, err) +func (s *DockerSuite) TestCpToStdout(c *check.C) { + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") + if exitCode != 0 { + c.Fatalf("failed to create a container:%s\n", out) } cID := strings.TrimSpace(out) - defer deleteContainer(cID) - out, _, err = dockerCmd(t, "wait", cID) - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatalf("failed to set up container:%s\n%s", out, err) + out, _ = dockerCmd(c, "wait", cID) + if strings.TrimSpace(out) != "0" { + c.Fatalf("failed to set up container:%s\n", out) } - out, _, err = runCommandPipelineWithOutput( + out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "cp", cID+":/test", "-"), exec.Command("tar", "-vtf", "-")) + if err != nil { - t.Fatalf("Failed to run commands: %s", err) + c.Fatalf("Failed to run commands: %s", err) } if !strings.Contains(out, "test") || !strings.Contains(out, "-rw") { - t.Fatalf("Missing file from tar TOC:\n%s", out) + c.Fatalf("Missing file from tar TOC:\n%s", out) + } +} + +func (s *DockerSuite) TestCpNameHasColon(c *check.C) { + testRequires(c, SameHostDaemon) + + out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") + if exitCode != 0 { + c.Fatal("failed to create a container", out) + } + + cleanedContainerID := strings.TrimSpace(out) + + out, _ = dockerCmd(c, "wait", cleanedContainerID) + if strings.TrimSpace(out) != "0" { + c.Fatal("failed to set up container", out) + } + + tmpdir, err := ioutil.TempDir("", "docker-integration") + if err != nil { + c.Fatal(err) + } + defer os.RemoveAll(tmpdir) + _, _ = dockerCmd(c, "cp", cleanedContainerID+":/te:s:t", tmpdir) + content, err := ioutil.ReadFile(tmpdir + "/te:s:t") + if string(content) != "lololol\n" { + c.Fatalf("Wrong content in copied file %q, should be %q", content, "lololol\n") } - logDone("cp - to stdout") } diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go index 3a3c2f07df249..646a8eafe023b 100644 --- a/integration-cli/docker_cli_create_test.go +++ b/integration-cli/docker_cli_create_test.go @@ -6,20 +6,18 @@ import ( "os/exec" "reflect" "strings" - "testing" "time" "github.com/docker/docker/nat" + "github.com/go-check/check" ) // Make sure we can create a simple container with some args -func TestCreateArgs(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestCreateArgs(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "busybox", "command", "arg1", "arg2", "arg with space") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -27,7 +25,7 @@ func TestCreateArgs(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { @@ -38,40 +36,38 @@ func TestCreateArgs(t *testing.T) { Image string }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { - t.Fatalf("Error inspecting the container: %s", err) + c.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { - t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } - c := containers[0] - if c.Path != "command" { - t.Fatalf("Unexpected container path. Expected command, received: %s", c.Path) + cont := containers[0] + if cont.Path != "command" { + c.Fatalf("Unexpected container path. Expected command, received: %s", cont.Path) } b := false expected := []string{"arg1", "arg2", "arg with space"} for i, arg := range expected { - if arg != c.Args[i] { + if arg != cont.Args[i] { b = true break } } - if len(c.Args) != len(expected) || b { - t.Fatalf("Unexpected args. Expected %v, received: %v", expected, c.Args) + if len(cont.Args) != len(expected) || b { + c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) } - logDone("create - args") } // Make sure we can set hostconfig options too -func TestCreateHostConfig(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCreateHostConfig(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "-P", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -79,7 +75,7 @@ func TestCreateHostConfig(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { @@ -88,31 +84,29 @@ func TestCreateHostConfig(t *testing.T) { } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { - t.Fatalf("Error inspecting the container: %s", err) + c.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { - t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } - c := containers[0] - if c.HostConfig == nil { - t.Fatalf("Expected HostConfig, got none") + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") } - if !c.HostConfig.PublishAllPorts { - t.Fatalf("Expected PublishAllPorts, got false") + if !cont.HostConfig.PublishAllPorts { + c.Fatalf("Expected PublishAllPorts, got false") } - logDone("create - hostconfig") } -func TestCreateWithPortRange(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -120,7 +114,7 @@ func TestCreateWithPortRange(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { @@ -129,39 +123,37 @@ func TestCreateWithPortRange(t *testing.T) { } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { - t.Fatalf("Error inspecting the container: %s", err) + c.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { - t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } - c := containers[0] - if c.HostConfig == nil { - t.Fatalf("Expected HostConfig, got none") + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") } - if len(c.HostConfig.PortBindings) != 4 { - t.Fatalf("Expected 4 ports bindings, got %d", len(c.HostConfig.PortBindings)) + if len(cont.HostConfig.PortBindings) != 4 { + c.Fatalf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings)) } - for k, v := range c.HostConfig.PortBindings { + for k, v := range cont.HostConfig.PortBindings { if len(v) != 1 { - t.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) + c.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) } if k.Port() != v[0].HostPort { - t.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) + c.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) } } - logDone("create - port range") } -func TestCreateWithiLargePortRange(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCreateWithiLargePortRange(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -169,7 +161,7 @@ func TestCreateWithiLargePortRange(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } containers := []struct { @@ -178,40 +170,38 @@ func TestCreateWithiLargePortRange(t *testing.T) { } }{} if err := json.Unmarshal([]byte(out), &containers); err != nil { - t.Fatalf("Error inspecting the container: %s", err) + c.Fatalf("Error inspecting the container: %s", err) } if len(containers) != 1 { - t.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) + c.Fatalf("Unexpected container count. Expected 0, received: %d", len(containers)) } - c := containers[0] - if c.HostConfig == nil { - t.Fatalf("Expected HostConfig, got none") + cont := containers[0] + if cont.HostConfig == nil { + c.Fatalf("Expected HostConfig, got none") } - if len(c.HostConfig.PortBindings) != 65535 { - t.Fatalf("Expected 65535 ports bindings, got %d", len(c.HostConfig.PortBindings)) + if len(cont.HostConfig.PortBindings) != 65535 { + c.Fatalf("Expected 65535 ports bindings, got %d", len(cont.HostConfig.PortBindings)) } - for k, v := range c.HostConfig.PortBindings { + for k, v := range cont.HostConfig.PortBindings { if len(v) != 1 { - t.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) + c.Fatalf("Expected 1 ports binding, for the port %s but found %s", k, v) } if k.Port() != v[0].HostPort { - t.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) + c.Fatalf("Expected host port %d to match published port %d", k.Port(), v[0].HostPort) } } - logDone("create - large port range") } // "test123" should be printed by docker create + start -func TestCreateEchoStdout(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -219,89 +209,84 @@ func TestCreateEchoStdout(t *testing.T) { runCmd = exec.Command(dockerBinary, "start", "-ai", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "test123\n" { - t.Errorf("container should've printed 'test123', got %q", out) + c.Errorf("container should've printed 'test123', got %q", out) } - logDone("create - echo test123") } -func TestCreateVolumesCreated(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { + testRequires(c, SameHostDaemon) name := "test_create_volume" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-v", "/foo", "busybox")); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } dir, err := inspectFieldMap(name, "Volumes", "/foo") if err != nil { - t.Fatalf("Error getting volume host path: %q", err) + c.Fatalf("Error getting volume host path: %q", err) } if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { - t.Fatalf("Volume was not created") + c.Fatalf("Volume was not created") } if err != nil { - t.Fatalf("Error statting volume host path: %q", err) + c.Fatalf("Error statting volume host path: %q", err) } - logDone("create - volumes are created") } -func TestCreateLabels(t *testing.T) { +func (s *DockerSuite) TestCreateLabels(c *check.C) { name := "test_create_labels" expected := map[string]string{"k1": "v1", "k2": "v2"} if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox")); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } actual := make(map[string]string) err := inspectFieldAndMarshall(name, "Config.Labels", &actual) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !reflect.DeepEqual(expected, actual) { - t.Fatalf("Expected %s got %s", expected, actual) + c.Fatalf("Expected %s got %s", expected, actual) } - - deleteAllContainers() - - logDone("create - labels") } -func TestCreateLabelFromImage(t *testing.T) { +func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { imageName := "testcreatebuildlabel" - defer deleteImages(imageName) _, err := buildImage(imageName, `FROM busybox LABEL k1=v1 k2=v2`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } name := "test_create_labels_from_image" expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName)); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } actual := make(map[string]string) err = inspectFieldAndMarshall(name, "Config.Labels", &actual) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !reflect.DeepEqual(expected, actual) { - t.Fatalf("Expected %s got %s", expected, actual) + c.Fatalf("Expected %s got %s", expected, actual) } +} - deleteAllContainers() - - logDone("create - labels from image") +func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { + out, _ := dockerCmd(c, "run", "-h", "web.0", "busybox", "hostname") + if strings.TrimSpace(out) != "web.0" { + c.Fatalf("hostname not set, expected `web.0`, got: %s", out) + } } diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go index 3a10fb004cb21..e099995ad3aed 100644 --- a/integration-cli/docker_cli_daemon_test.go +++ b/integration-cli/docker_cli_daemon_test.go @@ -10,144 +10,119 @@ import ( "os/exec" "path/filepath" "strings" - "testing" "time" "github.com/docker/libtrust" + "github.com/go-check/check" ) -func TestDaemonRestartWithRunningContainersPorts(t *testing.T) { - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatalf("Could not start daemon with busybox: %v", err) +func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) } - defer d.Stop() - if out, err := d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { - t.Fatalf("Could not run top1: err=%v\n%s", err, out) + if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top1: err=%v\n%s", err, out) } // --restart=no by default - if out, err := d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { - t.Fatalf("Could not run top2: err=%v\n%s", err, out) + if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top2: err=%v\n%s", err, out) } testRun := func(m map[string]bool, prefix string) { var format string - for c, shouldRun := range m { - out, err := d.Cmd("ps") + for cont, shouldRun := range m { + out, err := s.d.Cmd("ps") if err != nil { - t.Fatalf("Could not run ps: err=%v\n%q", err, out) + c.Fatalf("Could not run ps: err=%v\n%q", err, out) } if shouldRun { format = "%scontainer %q is not running" } else { format = "%scontainer %q is running" } - if shouldRun != strings.Contains(out, c) { - t.Fatalf(format, prefix, c) + if shouldRun != strings.Contains(out, cont) { + c.Fatalf(format, prefix, cont) } } } testRun(map[string]bool{"top1": true, "top2": true}, "") - if err := d.Restart(); err != nil { - t.Fatalf("Could not restart daemon: %v", err) + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) } - testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") - - logDone("daemon - running containers on daemon restart") } -func TestDaemonRestartWithVolumesRefs(t *testing.T) { - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() - if out, err := d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("run", "-d", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { + c.Fatal(err, out) } - if err := d.Restart(); err != nil { - t.Fatal(err) + if err := s.d.Restart(); err != nil { + c.Fatal(err) } - if _, err := d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { - t.Fatal(err) + if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { + c.Fatal(err) } - if out, err := d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { + c.Fatal(err, out) } - v, err := d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1") + v, err := s.d.Cmd("inspect", "--format", "{{ json .Volumes }}", "volrestarttest1") if err != nil { - t.Fatal(err) + c.Fatal(err) } volumes := make(map[string]string) json.Unmarshal([]byte(v), &volumes) if _, err := os.Stat(volumes["/foo"]); err != nil { - t.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err) + c.Fatalf("Expected volume to exist: %s - %s", volumes["/foo"], err) } - - logDone("daemon - volume refs are restored") } -func TestDaemonStartIptablesFalse(t *testing.T) { - d := NewDaemon(t) - if err := d.Start("--iptables=false"); err != nil { - t.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) +func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { + if err := s.d.Start("--iptables=false"); err != nil { + c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) } - d.Stop() - - logDone("daemon - started daemon with iptables=false") } // Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and // no longer has an IP associated, we should gracefully handle that case and associate // an IP with it rather than fail daemon start -func TestDaemonStartBridgeWithoutIPAssociation(t *testing.T) { - d := NewDaemon(t) +func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { // rather than depending on brctl commands to verify docker0 is created and up // let's start the daemon and stop it, and then make a modification to run the // actual test - if err := d.Start(); err != nil { - t.Fatalf("Could not start daemon: %v", err) + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) } - if err := d.Stop(); err != nil { - t.Fatalf("Could not stop daemon: %v", err) + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) } // now we will remove the ip from docker0 and then try starting the daemon ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) if err != nil { - t.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) + c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) } - if err := d.Start(); err != nil { + if err := s.d.Start(); err != nil { warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" - t.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) + c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) } - - // cleanup - stop the daemon if test passed - if err := d.Stop(); err != nil { - t.Fatalf("Could not stop daemon: %v", err) - } - - logDone("daemon - successful daemon start when bridge has no IP association") } -func TestDaemonIptablesClean(t *testing.T) { - defer deleteAllContainers() - - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatalf("Could not start daemon with busybox: %v", err) +func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) } - defer d.Stop() - if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { - t.Fatalf("Could not run top: %s, %v", out, err) + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running @@ -155,42 +130,36 @@ func TestDaemonIptablesClean(t *testing.T) { ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { - t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { - t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } - if err := d.Stop(); err != nil { - t.Fatalf("Could not stop daemon: %v", err) + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { - t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if strings.Contains(out, ipTablesSearchString) { - t.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) + c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) } - - logDone("daemon - run,iptables - iptables rules cleaned after daemon restart") } -func TestDaemonIptablesCreate(t *testing.T) { - defer deleteAllContainers() - - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatalf("Could not start daemon with busybox: %v", err) +func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) } - defer d.Stop() - if out, err := d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { - t.Fatalf("Could not run top: %s, %v", out, err) + if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: %s, %v", out, err) } // get output from iptables with container running @@ -198,101 +167,94 @@ func TestDaemonIptablesCreate(t *testing.T) { ipTablesCmd := exec.Command("iptables", "-nvL") out, _, err := runCommandWithOutput(ipTablesCmd) if err != nil { - t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { - t.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) + c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) } - if err := d.Restart(); err != nil { - t.Fatalf("Could not restart daemon: %v", err) + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) } // make sure the container is not running - runningOut, err := d.Cmd("inspect", "--format='{{.State.Running}}'", "top") + runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top") if err != nil { - t.Fatalf("Could not inspect on container: %s, %v", out, err) + c.Fatalf("Could not inspect on container: %s, %v", out, err) } if strings.TrimSpace(runningOut) != "true" { - t.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) + c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) } // get output from iptables after restart ipTablesCmd = exec.Command("iptables", "-nvL") out, _, err = runCommandWithOutput(ipTablesCmd) if err != nil { - t.Fatalf("Could not run iptables -nvL: %s, %v", out, err) + c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) } if !strings.Contains(out, ipTablesSearchString) { - t.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) + c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) } - - logDone("daemon - run,iptables - iptables rules for always restarted container created after daemon restart") } -func TestDaemonLoggingLevel(t *testing.T) { - d := NewDaemon(t) - - if err := d.Start("--log-level=bogus"); err == nil { - t.Fatal("Daemon should not have been able to start") - } +func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { + c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) +} - d = NewDaemon(t) - if err := d.Start("--log-level=debug"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { + if err := s.d.Start("--log-level=debug"); err != nil { + c.Fatal(err) } - d.Stop() - content, _ := ioutil.ReadFile(d.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { - t.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) + c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) } +} - d = NewDaemon(t) - if err := d.Start("--log-level=fatal"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { + // we creating new daemons to create new logFile + if err := s.d.Start("--log-level=fatal"); err != nil { + c.Fatal(err) } - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), `level=debug`) { - t.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) + c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) } +} - d = NewDaemon(t) - if err := d.Start("-D"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { + if err := s.d.Start("-D"); err != nil { + c.Fatal(err) } - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { - t.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content)) + c.Fatalf(`Missing level="debug" in log file using -D:\n%s`, string(content)) } +} - d = NewDaemon(t) - if err := d.Start("--debug"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { + if err := s.d.Start("--debug"); err != nil { + c.Fatal(err) } - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { - t.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content)) + c.Fatalf(`Missing level="debug" in log file using --debug:\n%s`, string(content)) } +} - d = NewDaemon(t) - if err := d.Start("--debug", "--log-level=fatal"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { + if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { + c.Fatal(err) } - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), `level=debug`) { - t.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) + c.Fatalf(`Missing level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) } - - logDone("daemon - Logging Level") } -func TestDaemonAllocatesListeningPort(t *testing.T) { +func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { listeningPorts := [][]string{ {"0.0.0.0", "0.0.0.0", "5678"}, {"127.0.0.1", "127.0.0.1", "1234"}, @@ -304,320 +266,283 @@ func TestDaemonAllocatesListeningPort(t *testing.T) { cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) } - d := NewDaemon(t) - if err := d.StartWithBusybox(cmdArgs...); err != nil { - t.Fatalf("Could not start daemon with busybox: %v", err) + if err := s.d.StartWithBusybox(cmdArgs...); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) } - defer d.Stop() for _, hostDirective := range listeningPorts { - output, err := d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") + output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") if err == nil { - t.Fatalf("Container should not start, expected port already allocated error: %q", output) + c.Fatalf("Container should not start, expected port already allocated error: %q", output) } else if !strings.Contains(output, "port is already allocated") { - t.Fatalf("Expected port is already allocated error: %q", output) + c.Fatalf("Expected port is already allocated error: %q", output) } } - - logDone("daemon - daemon listening port is allocated") } // #9629 -func TestDaemonVolumesBindsRefs(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonVolumesBindsRefs(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() tmp, err := ioutil.TempDir(os.TempDir(), "") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmp) if err := ioutil.WriteFile(tmp+"/test", []byte("testing"), 0655); err != nil { - t.Fatal(err) + c.Fatal(err) } - if out, err := d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("create", "-v", tmp+":/foo", "--name=voltest", "busybox"); err != nil { + c.Fatal(err, out) } - if err := d.Restart(); err != nil { - t.Fatal(err) + if err := s.d.Restart(); err != nil { + c.Fatal(err) } - if out, err := d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("run", "--volumes-from=voltest", "--name=consumer", "busybox", "/bin/sh", "-c", "[ -f /foo/test ]"); err != nil { + c.Fatal(err, out) } - - logDone("daemon - bind refs in data-containers survive daemon restart") } -func TestDaemonKeyGeneration(t *testing.T) { +func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") - d := NewDaemon(t) - if err := d.Start(); err != nil { - t.Fatalf("Could not start daemon: %v", err) + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) } - d.Stop() + s.d.Stop() k, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { - t.Fatalf("Error opening key file") + c.Fatalf("Error opening key file") } kid := k.KeyID() // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) if len(kid) != 59 { - t.Fatalf("Bad key ID: %s", kid) + c.Fatalf("Bad key ID: %s", kid) } - - logDone("daemon - key generation") } -func TestDaemonKeyMigration(t *testing.T) { +func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { // TODO: skip or update for Windows daemon os.Remove("/etc/docker/key.json") k1, err := libtrust.GenerateECP256PrivateKey() if err != nil { - t.Fatalf("Error generating private key: %s", err) + c.Fatalf("Error generating private key: %s", err) } if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { - t.Fatalf("Error creating .docker directory: %s", err) + c.Fatalf("Error creating .docker directory: %s", err) } if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { - t.Fatalf("Error saving private key: %s", err) + c.Fatalf("Error saving private key: %s", err) } - d := NewDaemon(t) - if err := d.Start(); err != nil { - t.Fatalf("Could not start daemon: %v", err) + if err := s.d.Start(); err != nil { + c.Fatalf("Could not start daemon: %v", err) } - d.Stop() + s.d.Stop() k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") if err != nil { - t.Fatalf("Error opening key file") + c.Fatalf("Error opening key file") } if k1.KeyID() != k2.KeyID() { - t.Fatalf("Key not migrated") + c.Fatalf("Key not migrated") } - - logDone("daemon - key migration") } // Simulate an older daemon (pre 1.3) coming up with volumes specified in containers // without corresponding volume json -func TestDaemonUpgradeWithVolumes(t *testing.T) { - d := NewDaemon(t) - +func (s *DockerDaemonSuite) TestDaemonUpgradeWithVolumes(c *check.C) { graphDir := filepath.Join(os.TempDir(), "docker-test") defer os.RemoveAll(graphDir) - if err := d.StartWithBusybox("-g", graphDir); err != nil { - t.Fatal(err) + if err := s.d.StartWithBusybox("-g", graphDir); err != nil { + c.Fatal(err) } - defer d.Stop() tmpDir := filepath.Join(os.TempDir(), "test") defer os.RemoveAll(tmpDir) - if out, err := d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("create", "-v", tmpDir+":/foo", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) } - if err := d.Stop(); err != nil { - t.Fatal(err) + if err := s.d.Stop(); err != nil { + c.Fatal(err) } // Remove this since we're expecting the daemon to re-create it too if err := os.RemoveAll(tmpDir); err != nil { - t.Fatal(err) + c.Fatal(err) } configDir := filepath.Join(graphDir, "volumes") if err := os.RemoveAll(configDir); err != nil { - t.Fatal(err) + c.Fatal(err) } - if err := d.Start("-g", graphDir); err != nil { - t.Fatal(err) + if err := s.d.Start("-g", graphDir); err != nil { + c.Fatal(err) } if _, err := os.Stat(tmpDir); os.IsNotExist(err) { - t.Fatalf("expected volume path %s to exist but it does not", tmpDir) + c.Fatalf("expected volume path %s to exist but it does not", tmpDir) } dir, err := ioutil.ReadDir(configDir) if err != nil { - t.Fatal(err) + c.Fatal(err) } if len(dir) == 0 { - t.Fatalf("expected volumes config dir to contain data for new volume") + c.Fatalf("expected volumes config dir to contain data for new volume") } // Now with just removing the volume config and not the volume data - if err := d.Stop(); err != nil { - t.Fatal(err) + if err := s.d.Stop(); err != nil { + c.Fatal(err) } if err := os.RemoveAll(configDir); err != nil { - t.Fatal(err) + c.Fatal(err) } - if err := d.Start("-g", graphDir); err != nil { - t.Fatal(err) + if err := s.d.Start("-g", graphDir); err != nil { + c.Fatal(err) } dir, err = ioutil.ReadDir(configDir) if err != nil { - t.Fatal(err) + c.Fatal(err) } if len(dir) == 0 { - t.Fatalf("expected volumes config dir to contain data for new volume") + c.Fatalf("expected volumes config dir to contain data for new volume") } - - logDone("daemon - volumes from old(pre 1.3) daemon work") } // GH#11320 - verify that the daemon exits on failure properly // Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means // to get a daemon init failure; no other tests for -b/--bip conflict are therefore required -func TestDaemonExitOnFailure(t *testing.T) { - d := NewDaemon(t) - defer d.Stop() - +func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { //attempt to start daemon with incorrect flags (we know -b and --bip conflict) - if err := d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { + if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { //verify we got the right error if !strings.Contains(err.Error(), "Daemon exited and never started") { - t.Fatalf("Expected daemon not to start, got %v", err) + c.Fatalf("Expected daemon not to start, got %v", err) } // look in the log and make sure we got the message that daemon is shutting down - runCmd := exec.Command("grep", "Shutting down daemon due to", d.LogfileName()) + runCmd := exec.Command("grep", "Error starting daemon", s.d.LogfileName()) if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf("Expected 'shutting down daemon due to error' message; but doesn't exist in log: %q, err: %v", out, err) + c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) } } else { //if we didn't get an error and the daemon is running, this is a failure - d.Stop() - t.Fatal("Conflicting options should cause the daemon to error out with a failure") + c.Fatal("Conflicting options should cause the daemon to error out with a failure") } - - logDone("daemon - verify no start on daemon init errors") } -func TestDaemonUlimitDefaults(t *testing.T) { - testRequires(t, NativeExecDriver) - d := NewDaemon(t) +func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { + testRequires(c, NativeExecDriver) - if err := d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { - t.Fatal(err) + if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") + out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) < 2 { - t.Fatalf("got unexpected output: %s", out) + c.Fatalf("got unexpected output: %s", out) } nofile := strings.TrimSpace(outArr[0]) nproc := strings.TrimSpace(outArr[1]) if nofile != "42" { - t.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) + c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) } if nproc != "2048" { - t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } // Now restart daemon with a new default - if err := d.Restart("--default-ulimit", "nofile=43"); err != nil { - t.Fatal(err) + if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { + c.Fatal(err) } - out, err = d.Cmd("start", "-a", "test") + out, err = s.d.Cmd("start", "-a", "test") if err != nil { - t.Fatal(err) + c.Fatal(err) } outArr = strings.Split(out, "\n") if len(outArr) < 2 { - t.Fatalf("got unexpected output: %s", out) + c.Fatalf("got unexpected output: %s", out) } nofile = strings.TrimSpace(outArr[0]) nproc = strings.TrimSpace(outArr[1]) if nofile != "43" { - t.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) + c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) } if nproc != "2048" { - t.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) + c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) } - - logDone("daemon - default ulimits are applied") } // #11315 -func TestDaemonRestartRenameContainer(t *testing.T) { - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() - if out, err := d.Cmd("run", "--name=test", "busybox"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { + c.Fatal(err, out) } - if out, err := d.Cmd("rename", "test", "test2"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { + c.Fatal(err, out) } - if err := d.Restart(); err != nil { - t.Fatal(err) + if err := s.d.Restart(); err != nil { + c.Fatal(err) } - if out, err := d.Cmd("start", "test2"); err != nil { - t.Fatal(err, out) + if out, err := s.d.Cmd("start", "test2"); err != nil { + c.Fatal(err, out) } - - logDone("daemon - rename persists through daemon restart") } -func TestDaemonLoggingDriverDefault(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) - if out, err := d.Cmd("wait", id); err != nil { - t.Fatal(out, err) + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) } - logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { - t.Fatal(err) + c.Fatal(err) } f, err := os.Open(logPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } var res struct { Log string `json:"log"` @@ -625,95 +550,83 @@ func TestDaemonLoggingDriverDefault(t *testing.T) { Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { - t.Fatal(err) + c.Fatal(err) } if res.Log != "testline\n" { - t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { - t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { - t.Fatalf("Log time %v in future", res.Time) + c.Fatalf("Log time %v in future", res.Time) } - logDone("daemon - default 'json-file' logging driver") } -func TestDaemonLoggingDriverDefaultOverride(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") + out, err := s.d.Cmd("run", "-d", "--log-driver=none", "busybox", "echo", "testline") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) - if out, err := d.Cmd("wait", id); err != nil { - t.Fatal(out, err) + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) } - logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { - t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } - logDone("daemon - default logging driver override in run") } -func TestDaemonLoggingDriverNone(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox("--log-driver=none"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) - if out, err := d.Cmd("wait", id); err != nil { - t.Fatal(out, err) + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) } - logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { - t.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) + c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) } - logDone("daemon - 'none' logging driver") } -func TestDaemonLoggingDriverNoneOverride(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox("--log-driver=none"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") + out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "echo", "testline") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) - if out, err := d.Cmd("wait", id); err != nil { - t.Fatal(out, err) + if out, err := s.d.Cmd("wait", id); err != nil { + c.Fatal(out, err) } - logPath := filepath.Join(d.folder, "graph", "containers", id, id+"-json.log") + logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") if _, err := os.Stat(logPath); err != nil { - t.Fatal(err) + c.Fatal(err) } f, err := os.Open(logPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } var res struct { Log string `json:"log"` @@ -721,121 +634,107 @@ func TestDaemonLoggingDriverNoneOverride(t *testing.T) { Time time.Time `json:"time"` } if err := json.NewDecoder(f).Decode(&res); err != nil { - t.Fatal(err) + c.Fatal(err) } if res.Log != "testline\n" { - t.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") + c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") } if res.Stream != "stdout" { - t.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") + c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") } if !time.Now().After(res.Time) { - t.Fatalf("Log time %v in future", res.Time) + c.Fatalf("Log time %v in future", res.Time) } - logDone("daemon - 'none' logging driver override in run") } -func TestDaemonLoggingDriverNoneLogsError(t *testing.T) { - d := NewDaemon(t) - - if err := d.StartWithBusybox("--log-driver=none"); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { + if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { + c.Fatal(err) } - defer d.Stop() - out, err := d.Cmd("run", "-d", "busybox", "echo", "testline") + out, err := s.d.Cmd("run", "-d", "busybox", "echo", "testline") if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) - out, err = d.Cmd("logs", id) + out, err = s.d.Cmd("logs", id) if err == nil { - t.Fatalf("Logs should fail with \"none\" driver") + c.Fatalf("Logs should fail with \"none\" driver") } if !strings.Contains(out, `\"logs\" command is supported only for \"json-file\" logging driver`) { - t.Fatalf("There should be error about non-json-file driver, got %s", out) + c.Fatalf("There should be error about non-json-file driver, got %s", out) } - logDone("daemon - logs not available for non-json-file drivers") } -func TestDaemonDots(t *testing.T) { - defer deleteAllContainers() - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatal(err) +func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatal(err) } - defer d.Stop() // Now create 4 containers - if _, err := d.Cmd("create", "busybox"); err != nil { - t.Fatalf("Error creating container: %q", err) + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) } - if _, err := d.Cmd("create", "busybox"); err != nil { - t.Fatalf("Error creating container: %q", err) + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) } - if _, err := d.Cmd("create", "busybox"); err != nil { - t.Fatalf("Error creating container: %q", err) + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) } - if _, err := d.Cmd("create", "busybox"); err != nil { - t.Fatalf("Error creating container: %q", err) + if _, err := s.d.Cmd("create", "busybox"); err != nil { + c.Fatalf("Error creating container: %q", err) } - d.Stop() + s.d.Stop() - d.Start("--log-level=debug") - d.Stop() - content, _ := ioutil.ReadFile(d.logFile.Name()) + s.d.Start("--log-level=debug") + s.d.Stop() + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { - t.Fatalf("Debug level should not have ....\n%s", string(content)) + c.Fatalf("Debug level should not have ....\n%s", string(content)) } - d.Start("--log-level=error") - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + s.d.Start("--log-level=error") + s.d.Stop() + content, _ = ioutil.ReadFile(s.d.logFile.Name()) if strings.Contains(string(content), "....") { - t.Fatalf("Error level should not have ....\n%s", string(content)) + c.Fatalf("Error level should not have ....\n%s", string(content)) } - d.Start("--log-level=info") - d.Stop() - content, _ = ioutil.ReadFile(d.logFile.Name()) + s.d.Start("--log-level=info") + s.d.Stop() + content, _ = ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "....") { - t.Fatalf("Info level should have ....\n%s", string(content)) + c.Fatalf("Info level should have ....\n%s", string(content)) } - - logDone("daemon - test dots on INFO") } -func TestDaemonUnixSockCleanedUp(t *testing.T) { - d := NewDaemon(t) +func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { dir, err := ioutil.TempDir("", "socket-cleanup-test") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(dir) sockPath := filepath.Join(dir, "docker.sock") - if err := d.Start("--host", "unix://"+sockPath); err != nil { - t.Fatal(err) + if err := s.d.Start("--host", "unix://"+sockPath); err != nil { + c.Fatal(err) } - defer d.Stop() if _, err := os.Stat(sockPath); err != nil { - t.Fatal("socket does not exist") + c.Fatal("socket does not exist") } - if err := d.Stop(); err != nil { - t.Fatal(err) + if err := s.d.Stop(); err != nil { + c.Fatal(err) } if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { - t.Fatal("unix socket is not cleaned up") + c.Fatal("unix socket is not cleaned up") } - - logDone("daemon - unix socket is cleaned up") } -func TestDaemonwithwrongkey(t *testing.T) { +func (s *DockerDaemonSuite) TestDaemonwithwrongkey(c *check.C) { type Config struct { Crv string `json:"crv"` D string `json:"d"` @@ -846,24 +745,23 @@ func TestDaemonwithwrongkey(t *testing.T) { } os.Remove("/etc/docker/key.json") - d := NewDaemon(t) - if err := d.Start(); err != nil { - t.Fatalf("Failed to start daemon: %v", err) + if err := s.d.Start(); err != nil { + c.Fatalf("Failed to start daemon: %v", err) } - if err := d.Stop(); err != nil { - t.Fatalf("Could not stop daemon: %v", err) + if err := s.d.Stop(); err != nil { + c.Fatalf("Could not stop daemon: %v", err) } config := &Config{} bytes, err := ioutil.ReadFile("/etc/docker/key.json") if err != nil { - t.Fatalf("Error reading key.json file: %s", err) + c.Fatalf("Error reading key.json file: %s", err) } // byte[] to Data-Struct if err := json.Unmarshal(bytes, &config); err != nil { - t.Fatalf("Error Unmarshal: %s", err) + c.Fatalf("Error Unmarshal: %s", err) } //replace config.Kid with the fake value @@ -872,27 +770,122 @@ func TestDaemonwithwrongkey(t *testing.T) { // NEW Data-Struct to byte[] newBytes, err := json.Marshal(&config) if err != nil { - t.Fatalf("Error Marshal: %s", err) + c.Fatalf("Error Marshal: %s", err) } // write back if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { - t.Fatalf("Error ioutil.WriteFile: %s", err) + c.Fatalf("Error ioutil.WriteFile: %s", err) } - d1 := NewDaemon(t) + defer os.Remove("/etc/docker/key.json") - if err := d1.Start(); err == nil { - d1.Stop() - t.Fatalf("It should not be succssful to start daemon with wrong key: %v", err) + if err := s.d.Start(); err == nil { + c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) } - content, _ := ioutil.ReadFile(d1.logFile.Name()) + content, _ := ioutil.ReadFile(s.d.logFile.Name()) if !strings.Contains(string(content), "Public Key ID does not match") { - t.Fatal("Missing KeyID message from daemon logs") + c.Fatal("Missing KeyID message from daemon logs") } +} - os.Remove("/etc/docker/key.json") - logDone("daemon - it should be failed to start daemon with wrong key") +func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") + if err != nil { + c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) + } + containerID := strings.TrimSpace(out) + + if out, err := s.d.Cmd("kill", containerID); err != nil { + c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) + } + + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) + } + + errchan := make(chan error) + go func() { + if out, err := s.d.Cmd("wait", containerID); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + + select { + case <-time.After(5 * time.Second): + c.Fatal("Waiting on a stopped (killed) container timed out") + case err := <-errchan: + if err != nil { + c.Fatal(err) + } + } +} + +// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint +func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) { + const ( + testDaemonHttpsAddr = "localhost:4271" + ) + + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHttpsAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + //force tcp protocol + host := fmt.Sprintf("tcp://%s", testDaemonHttpsAddr) + daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err != nil { + c.Fatalf("Error Occurred: %s and output: %s", err, out) + } +} + +// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint +// by using a rogue client certificate and checks that it fails with the expected error. +func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) { + const ( + errBadCertificate = "remote error: bad certificate" + testDaemonHttpsAddr = "localhost:4271" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", + "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHttpsAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + //force tcp protocol + host := fmt.Sprintf("tcp://%s", testDaemonHttpsAddr) + daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err == nil || !strings.Contains(out, errBadCertificate) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) + } +} + +// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint +// which provides a rogue server certificate and checks that it fails with the expected error +func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) { + const ( + errCaUnknown = "x509: certificate signed by unknown authority" + testDaemonRogueHttpsAddr = "localhost:4272" + ) + if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", + "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHttpsAddr); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) + } + + //force tcp protocol + host := fmt.Sprintf("tcp://%s", testDaemonRogueHttpsAddr) + daemonArgs := []string{"--host", host, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} + out, err := s.d.CmdWithArgs(daemonArgs, "info") + if err == nil || !strings.Contains(out, errCaUnknown) { + c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) + } } diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go index f7f8cd7a9d356..332b128ed8f4c 100644 --- a/integration-cli/docker_cli_diff_test.go +++ b/integration-cli/docker_cli_diff_test.go @@ -3,16 +3,17 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // ensure that an added file shows up in docker diff -func TestDiffFilenameShownInOutput(t *testing.T) { +func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { containerCmd := `echo foo > /root/bar` runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to start the container: %s, %v", out, err) + c.Fatalf("failed to start the container: %s, %v", out, err) } cleanCID := strings.TrimSpace(out) @@ -20,7 +21,7 @@ func TestDiffFilenameShownInOutput(t *testing.T) { diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { - t.Fatalf("failed to run diff: %s %v", out, err) + c.Fatalf("failed to run diff: %s %v", out, err) } found := false @@ -31,15 +32,12 @@ func TestDiffFilenameShownInOutput(t *testing.T) { } } if !found { - t.Errorf("couldn't find the new file in docker diff's output: %v", out) + c.Errorf("couldn't find the new file in docker diff's output: %v", out) } - deleteContainer(cleanCID) - - logDone("diff - check if created file shows up") } // test to ensure GH #3840 doesn't occur any more -func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { +func (s *DockerSuite) TestDiffEnsureDockerinitFilesAreIgnored(c *check.C) { // this is a list of files which shouldn't show up in `docker diff` dockerinitFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerinit", "/.dockerenv"} @@ -49,7 +47,7 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", containerCmd) out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanCID := strings.TrimSpace(out) @@ -57,26 +55,22 @@ func TestDiffEnsureDockerinitFilesAreIgnored(t *testing.T) { diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { - t.Fatalf("failed to run diff: %s, %v", out, err) + c.Fatalf("failed to run diff: %s, %v", out, err) } - deleteContainer(cleanCID) - for _, filename := range dockerinitFiles { if strings.Contains(out, filename) { - t.Errorf("found file which should've been ignored %v in diff output", filename) + c.Errorf("found file which should've been ignored %v in diff output", filename) } } } - - logDone("diff - check if ignored files show up in diff") } -func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { +func (s *DockerSuite) TestDiffEnsureOnlyKmsgAndPtmx(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sleep", "0") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanCID := strings.TrimSpace(out) @@ -84,9 +78,8 @@ func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { diffCmd := exec.Command(dockerBinary, "diff", cleanCID) out, _, err = runCommandWithOutput(diffCmd) if err != nil { - t.Fatalf("failed to run diff: %s, %v", out, err) + c.Fatalf("failed to run diff: %s, %v", out, err) } - deleteContainer(cleanCID) expected := map[string]bool{ "C /dev": true, @@ -109,9 +102,7 @@ func TestDiffEnsureOnlyKmsgAndPtmx(t *testing.T) { for _, line := range strings.Split(out, "\n") { if line != "" && !expected[line] { - t.Errorf("%q is shown in the diff but shouldn't", line) + c.Errorf("%q is shown in the diff but shouldn't", line) } } - - logDone("diff - ensure that only kmsg and ptmx in diff") } diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go index 767af55018392..80cc0c69d54b8 100644 --- a/integration-cli/docker_cli_events_test.go +++ b/integration-cli/docker_cli_events_test.go @@ -7,20 +7,22 @@ import ( "regexp" "strconv" "strings" - "testing" + "sync" "time" + + "github.com/go-check/check" ) -func TestEventsUntag(t *testing.T) { +func (s *DockerSuite) TestEventsUntag(c *check.C) { image := "busybox" - dockerCmd(t, "tag", image, "utest:tag1") - dockerCmd(t, "tag", image, "utest:tag2") - dockerCmd(t, "rmi", "utest:tag1") - dockerCmd(t, "rmi", "utest:tag2") + dockerCmd(c, "tag", image, "utest:tag1") + dockerCmd(c, "tag", image, "utest:tag2") + dockerCmd(c, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag2") eventsCmd := exec.Command(dockerBinary, "events", "--since=1") out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*200)) if exitCode != 0 || err != nil { - t.Fatalf("Failed to get events - exit code %d: %s", exitCode, err) + c.Fatalf("Failed to get events - exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") nEvents := len(events) @@ -29,194 +31,224 @@ func TestEventsUntag(t *testing.T) { // looking for. for _, v := range events[nEvents-3 : nEvents-1] { if !strings.Contains(v, "untag") { - t.Fatalf("event should be untag, not %#v", v) + c.Fatalf("event should be untag, not %#v", v) } } - logDone("events - untags are logged") } -func TestEventsContainerFailStartDie(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestEventsContainerFailStartDie(c *check.C) { - out, _, _ := dockerCmd(t, "images", "-q") + out, _ := dockerCmd(c, "images", "-q") image := strings.Split(out, "\n")[0] eventsCmd := exec.Command(dockerBinary, "run", "--name", "testeventdie", image, "blerg") _, _, err := runCommandWithOutput(eventsCmd) if err == nil { - t.Fatalf("Container run with command blerg should have failed, but it did not") + c.Fatalf("Container run with command blerg should have failed, but it did not") } - eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + eventsCmd = exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= 1 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } startEvent := strings.Fields(events[len(events)-3]) dieEvent := strings.Fields(events[len(events)-2]) if startEvent[len(startEvent)-1] != "start" { - t.Fatalf("event should be start, not %#v", startEvent) + c.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { - t.Fatalf("event should be die, not %#v", dieEvent) + c.Fatalf("event should be die, not %#v", dieEvent) } - logDone("events - container unwilling to start logs die") } -func TestEventsLimit(t *testing.T) { - defer deleteAllContainers() - for i := 0; i < 30; i++ { - dockerCmd(t, "run", "busybox", "echo", strconv.Itoa(i)) +func (s *DockerSuite) TestEventsLimit(c *check.C) { + + var waitGroup sync.WaitGroup + errChan := make(chan error, 17) + + args := []string{"run", "--rm", "busybox", "true"} + for i := 0; i < 17; i++ { + waitGroup.Add(1) + go func() { + defer waitGroup.Done() + errChan <- exec.Command(dockerBinary, args...).Run() + }() + } + + waitGroup.Wait() + close(errChan) + + for err := range errChan { + if err != nil { + c.Fatalf("%q failed with error: %v", strings.Join(args, " "), err) + } } - eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, _, _ := runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") nEvents := len(events) - 1 if nEvents != 64 { - t.Fatalf("events should be limited to 64, but received %d", nEvents) + c.Fatalf("events should be limited to 64, but received %d", nEvents) } - logDone("events - limited to 64 entries") } -func TestEventsContainerEvents(t *testing.T) { - dockerCmd(t, "run", "--rm", "busybox", "true") - eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) +func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "true") + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { - t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) < 4 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } createEvent := strings.Fields(events[len(events)-4]) startEvent := strings.Fields(events[len(events)-3]) dieEvent := strings.Fields(events[len(events)-2]) destroyEvent := strings.Fields(events[len(events)-1]) if createEvent[len(createEvent)-1] != "create" { - t.Fatalf("event should be create, not %#v", createEvent) + c.Fatalf("event should be create, not %#v", createEvent) } if startEvent[len(startEvent)-1] != "start" { - t.Fatalf("event should be start, not %#v", startEvent) + c.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { - t.Fatalf("event should be die, not %#v", dieEvent) + c.Fatalf("event should be die, not %#v", dieEvent) } if destroyEvent[len(destroyEvent)-1] != "destroy" { - t.Fatalf("event should be destroy, not %#v", destroyEvent) + c.Fatalf("event should be destroy, not %#v", destroyEvent) } - logDone("events - container create, start, die, destroy is logged") } -func TestEventsContainerEventsSinceUnixEpoch(t *testing.T) { - dockerCmd(t, "run", "--rm", "busybox", "true") +func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { + dockerCmd(c, "run", "--rm", "busybox", "true") timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since='%s'", timeBeginning), - fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { - t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) < 4 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } createEvent := strings.Fields(events[len(events)-4]) startEvent := strings.Fields(events[len(events)-3]) dieEvent := strings.Fields(events[len(events)-2]) destroyEvent := strings.Fields(events[len(events)-1]) if createEvent[len(createEvent)-1] != "create" { - t.Fatalf("event should be create, not %#v", createEvent) + c.Fatalf("event should be create, not %#v", createEvent) } if startEvent[len(startEvent)-1] != "start" { - t.Fatalf("event should be start, not %#v", startEvent) + c.Fatalf("event should be start, not %#v", startEvent) } if dieEvent[len(dieEvent)-1] != "die" { - t.Fatalf("event should be die, not %#v", dieEvent) + c.Fatalf("event should be die, not %#v", dieEvent) } if destroyEvent[len(destroyEvent)-1] != "destroy" { - t.Fatalf("event should be destroy, not %#v", destroyEvent) + c.Fatalf("event should be destroy, not %#v", destroyEvent) } - logDone("events - container create, start, die, destroy since Unix Epoch time") } -func TestEventsImageUntagDelete(t *testing.T) { +func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { name := "testimageevents" - defer deleteImages(name) _, err := buildImage(name, `FROM scratch MAINTAINER "docker"`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if err := deleteImages(name); err != nil { - t.Fatal(err) + c.Fatal(err) } - eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, exitCode, err := runCommandWithOutput(eventsCmd) if exitCode != 0 || err != nil { - t.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) + c.Fatalf("Failed to get events with exit code %d: %s", exitCode, err) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) < 2 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } untagEvent := strings.Fields(events[len(events)-2]) deleteEvent := strings.Fields(events[len(events)-1]) if untagEvent[len(untagEvent)-1] != "untag" { - t.Fatalf("untag should be untag, not %#v", untagEvent) + c.Fatalf("untag should be untag, not %#v", untagEvent) } if deleteEvent[len(deleteEvent)-1] != "delete" { - t.Fatalf("delete should be delete, not %#v", deleteEvent) + c.Fatalf("delete should be delete, not %#v", deleteEvent) } - logDone("events - image untag, delete is logged") } -func TestEventsImagePull(t *testing.T) { - since := daemonTime(t).Unix() - testRequires(t, Network) - - defer deleteImages("hello-world") +func (s *DockerSuite) TestEventsImagePull(c *check.C) { + since := daemonTime(c).Unix() + testRequires(c, Network) pullCmd := exec.Command(dockerBinary, "pull", "hello-world") if out, _, err := runCommandWithOutput(pullCmd); err != nil { - t.Fatalf("pulling the hello-world image from has failed: %s, %v", out, err) + c.Fatalf("pulling the hello-world image from has failed: %s, %v", out, err) } eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), - fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, _, _ := runCommandWithOutput(eventsCmd) events := strings.Split(strings.TrimSpace(out), "\n") event := strings.TrimSpace(events[len(events)-1]) if !strings.HasSuffix(event, "hello-world:latest: pull") { - t.Fatalf("Missing pull event - got:%q", event) + c.Fatalf("Missing pull event - got:%q", event) } - logDone("events - image pull is logged") } -func TestEventsImageImport(t *testing.T) { - defer deleteAllContainers() - since := daemonTime(t).Unix() +func (s *DockerSuite) TestEventsImageImport(c *check.C) { + since := daemonTime(c).Unix() + + id := make(chan string) + eventImport := make(chan struct{}) + eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(since, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatal(err) + } + defer eventsCmd.Process.Kill() + + go func() { + containerID := <-id + + matchImport := regexp.MustCompile(containerID + `: import$`) + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + if matchImport.MatchString(scanner.Text()) { + close(eventImport) + } + } + }() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal("failed to create a container", out, err) + c.Fatal("failed to create a container", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -225,25 +257,20 @@ func TestEventsImageImport(t *testing.T) { exec.Command(dockerBinary, "import", "-"), ) if err != nil { - t.Errorf("import failed with errors: %v, output: %q", err, out) + c.Errorf("import failed with errors: %v, output: %q", err, out) } + newContainerID := strings.TrimSpace(out) + id <- newContainerID - eventsCmd := exec.Command(dockerBinary, "events", - fmt.Sprintf("--since=%d", since), - fmt.Sprintf("--until=%d", daemonTime(t).Unix())) - out, _, _ = runCommandWithOutput(eventsCmd) - - events := strings.Split(strings.TrimSpace(out), "\n") - event := strings.TrimSpace(events[len(events)-1]) - - if !strings.HasSuffix(event, ": import") { - t.Fatalf("Missing import event - got:%q", event) + select { + case <-time.After(5 * time.Second): + c.Fatal("failed to observe image import in timely fashion") + case <-eventImport: + // ignore, done } - - logDone("events - image import is logged") } -func TestEventsFilters(t *testing.T) { +func (s *DockerSuite) TestEventsFilters(c *check.C) { parseEvents := func(out, match string) { events := strings.Split(out, "\n") events = events[:len(events)-1] @@ -251,67 +278,65 @@ func TestEventsFilters(t *testing.T) { eventFields := strings.Fields(event) eventName := eventFields[len(eventFields)-1] if ok, err := regexp.MatchString(match, eventName); err != nil || !ok { - t.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err) + c.Fatalf("event should match %s, got %#v, err: %v", match, eventFields, err) } } } - since := daemonTime(t).Unix() + since := daemonTime(c).Unix() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--rm", "busybox", "true")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die")) + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die")) if err != nil { - t.Fatalf("Failed to get events: %s", err) + c.Fatalf("Failed to get events: %s", err) } parseEvents(out, "die") - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", "event=die", "--filter", "event=start")) + out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", "event=die", "--filter", "event=start")) if err != nil { - t.Fatalf("Failed to get events: %s", err) + c.Fatalf("Failed to get events: %s", err) } parseEvents(out, "((die)|(start))") // make sure we at least got 2 start events count := strings.Count(out, "start") if count < 2 { - t.Fatalf("should have had 2 start events but had %d, out: %s", count, out) + c.Fatalf("should have had 2 start events but had %d, out: %s", count, out) } - logDone("events - filters") } -func TestEventsFilterImageName(t *testing.T) { - since := daemonTime(t).Unix() - defer deleteAllContainers() +func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { + since := daemonTime(c).Unix() out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_1", "-d", "busybox:latest", "true")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } container1 := strings.TrimSpace(out) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "container_2", "-d", "busybox", "true")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } container2 := strings.TrimSpace(out) - s := "busybox" - eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(t).Unix()), "--filter", fmt.Sprintf("image=%s", s)) + name := "busybox" + eventsCmd := exec.Command(dockerBinary, "events", fmt.Sprintf("--since=%d", since), fmt.Sprintf("--until=%d", daemonTime(c).Unix()), "--filter", fmt.Sprintf("image=%s", name)) out, _, err = runCommandWithOutput(eventsCmd) if err != nil { - t.Fatalf("Failed to get events, error: %s(%s)", err, out) + c.Fatalf("Failed to get events, error: %s(%s)", err, out) } events := strings.Split(out, "\n") events = events[:len(events)-1] if len(events) == 0 { - t.Fatalf("Expected events but found none for the image busybox:latest") + c.Fatalf("Expected events but found none for the image busybox:latest") } count1 := 0 count2 := 0 @@ -324,27 +349,28 @@ func TestEventsFilterImageName(t *testing.T) { } } if count1 == 0 || count2 == 0 { - t.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2) + c.Fatalf("Expected events from each container but got %d from %s and %d from %s", count1, container1, count2, container2) } - logDone("events - filters using image") } -func TestEventsFilterContainer(t *testing.T) { - defer deleteAllContainers() - since := fmt.Sprintf("%d", daemonTime(t).Unix()) +func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { + since := fmt.Sprintf("%d", daemonTime(c).Unix()) nameID := make(map[string]string) for _, name := range []string{"container_1", "container_2"} { - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", name, "busybox", "true")) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", name, "busybox", "true")) if err != nil { - t.Fatal(err) + c.Fatalf("Error: %v, Output: %s", err, out) } - nameID[name] = strings.TrimSpace(out) - waitInspect(name, "{{.State.Runing }}", "false", 5) + id, err := inspectField(name, "Id") + if err != nil { + c.Fatal(err) + } + nameID[name] = id } - until := fmt.Sprintf("%d", daemonTime(t).Unix()) + until := fmt.Sprintf("%d", daemonTime(c).Unix()) checkEvents := func(id string, events []string) error { if len(events) != 3 { // create, start, die @@ -370,57 +396,49 @@ func TestEventsFilterContainer(t *testing.T) { eventsCmd := exec.Command(dockerBinary, "events", "--since", since, "--until", until, "--filter", "container="+name) out, _, err := runCommandWithOutput(eventsCmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") if err := checkEvents(ID, events); err != nil { - t.Fatal(err) + c.Fatal(err) } // filter by ID's eventsCmd = exec.Command(dockerBinary, "events", "--since", since, "--until", until, "--filter", "container="+ID) out, _, err = runCommandWithOutput(eventsCmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") if err := checkEvents(ID, events); err != nil { - t.Fatal(err) + c.Fatal(err) } } - logDone("events - filters using container name") } -func TestEventsStreaming(t *testing.T) { - start := daemonTime(t).Unix() +func (s *DockerSuite) TestEventsStreaming(c *check.C) { + start := daemonTime(c).Unix() - finish := make(chan struct{}) - defer close(finish) id := make(chan string) eventCreate := make(chan struct{}) eventStart := make(chan struct{}) eventDie := make(chan struct{}) eventDestroy := make(chan struct{}) - go func() { - eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(start, 10)) - stdout, err := eventsCmd.StdoutPipe() - if err != nil { - t.Fatal(err) - } - err = eventsCmd.Start() - if err != nil { - t.Fatalf("failed to start 'docker events': %s", err) - } - - go func() { - <-finish - eventsCmd.Process.Kill() - }() + eventsCmd := exec.Command(dockerBinary, "events", "--since", strconv.FormatInt(start, 10)) + stdout, err := eventsCmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + if err := eventsCmd.Start(); err != nil { + c.Fatalf("failed to start 'docker events': %s", err) + } + defer eventsCmd.Process.Kill() + go func() { containerID := <-id matchCreate := regexp.MustCompile(containerID + `: \(from busybox:latest\) create$`) @@ -441,38 +459,33 @@ func TestEventsStreaming(t *testing.T) { close(eventDestroy) } } - - err = eventsCmd.Wait() - if err != nil && !IsKilled(err) { - t.Fatalf("docker events had bad exit status: %s", err) - } }() runCmd := exec.Command(dockerBinary, "run", "-d", "busybox:latest", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) id <- cleanedContainerID select { case <-time.After(5 * time.Second): - t.Fatal("failed to observe container create in timely fashion") + c.Fatal("failed to observe container create in timely fashion") case <-eventCreate: // ignore, done } select { case <-time.After(5 * time.Second): - t.Fatal("failed to observe container start in timely fashion") + c.Fatal("failed to observe container start in timely fashion") case <-eventStart: // ignore, done } select { case <-time.After(5 * time.Second): - t.Fatal("failed to observe container die in timely fashion") + c.Fatal("failed to observe container die in timely fashion") case <-eventDie: // ignore, done } @@ -480,15 +493,13 @@ func TestEventsStreaming(t *testing.T) { rmCmd := exec.Command(dockerBinary, "rm", cleanedContainerID) out, _, err = runCommandWithOutput(rmCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } select { case <-time.After(5 * time.Second): - t.Fatal("failed to observe container destroy in timely fashion") + c.Fatal("failed to observe container destroy in timely fashion") case <-eventDestroy: // ignore, done } - - logDone("events - streamed to stdout") } diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go index 4e54283501213..1a08f2b3c0182 100644 --- a/integration-cli/docker_cli_events_unix_test.go +++ b/integration-cli/docker_cli_events_unix_test.go @@ -8,48 +8,46 @@ import ( "io/ioutil" "os" "os/exec" - "testing" "unicode" + "github.com/go-check/check" "github.com/kr/pty" ) // #5979 -func TestEventsRedirectStdout(t *testing.T) { - since := daemonTime(t).Unix() - dockerCmd(t, "run", "busybox", "true") - defer deleteAllContainers() +func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { + since := daemonTime(c).Unix() + dockerCmd(c, "run", "busybox", "true") file, err := ioutil.TempFile("", "") if err != nil { - t.Fatalf("could not create temp file: %v", err) + c.Fatalf("could not create temp file: %v", err) } defer os.Remove(file.Name()) - command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(t).Unix(), file.Name()) + command := fmt.Sprintf("%s events --since=%d --until=%d > %s", dockerBinary, since, daemonTime(c).Unix(), file.Name()) _, tty, err := pty.Open() if err != nil { - t.Fatalf("Could not open pty: %v", err) + c.Fatalf("Could not open pty: %v", err) } cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty if err := cmd.Run(); err != nil { - t.Fatalf("run err for command %q: %v", command, err) + c.Fatalf("run err for command %q: %v", command, err) } scanner := bufio.NewScanner(file) for scanner.Scan() { - for _, c := range scanner.Text() { - if unicode.IsControl(c) { - t.Fatalf("found control character %v", []byte(string(c))) + for _, ch := range scanner.Text() { + if unicode.IsControl(ch) { + c.Fatalf("found control character %v", []byte(string(ch))) } } } if err := scanner.Err(); err != nil { - t.Fatalf("Scan err for command %q: %v", command, err) + c.Fatalf("Scan err for command %q: %v", command, err) } - logDone("events - redirect stdout") } diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go index 9fcee32a7a4cb..4b36d7b532142 100644 --- a/integration-cli/docker_cli_exec_test.go +++ b/integration-cli/docker_cli_exec_test.go @@ -12,224 +12,170 @@ import ( "sort" "strings" "sync" - "testing" "time" + + "github.com/go-check/check" ) -func TestExec(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExec(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/tmp/file") out, _, err := runCommandWithOutput(execCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.Trim(out, "\r\n") if expected := "test"; out != expected { - t.Errorf("container exec should've printed %q but printed %q", expected, out) + c.Errorf("container exec should've printed %q but printed %q", expected, out) } - logDone("exec - basic test") } -func TestExecInteractiveStdinClose(t *testing.T) { - defer deleteAllContainers() - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat")) - if err != nil { - t.Fatal(err) - } - - contId := strings.TrimSpace(out) - - returnchan := make(chan struct{}) - - go func() { - var err error - cmd := exec.Command(dockerBinary, "exec", "-i", contId, "/bin/ls", "/") - cmd.Stdin = os.Stdin - if err != nil { - t.Fatal(err) - } - - out, err := cmd.CombinedOutput() - if err != nil { - t.Fatal(err, string(out)) - } - - if string(out) == "" { - t.Fatalf("Output was empty, likely blocked by standard input") - } - - returnchan <- struct{}{} - }() - - select { - case <-returnchan: - case <-time.After(10 * time.Second): - t.Fatal("timed out running docker exec") - } - - logDone("exec - interactive mode closes stdin after execution") -} - -func TestExecInteractive(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecInteractive(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") stdin, err := execCmd.StdinPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } stdout, err := execCmd.StdoutPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } if err := execCmd.Start(); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := stdin.Write([]byte("cat /tmp/file\n")); err != nil { - t.Fatal(err) + c.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { - t.Fatal(err) + c.Fatal(err) } line = strings.TrimSpace(line) if line != "test" { - t.Fatalf("Output should be 'test', got '%q'", line) + c.Fatalf("Output should be 'test', got '%q'", line) } if err := stdin.Close(); err != nil { - t.Fatal(err) + c.Fatal(err) } - finish := make(chan struct{}) + errChan := make(chan error) go func() { - if err := execCmd.Wait(); err != nil { - t.Fatal(err) - } - close(finish) + errChan <- execCmd.Wait() + close(errChan) }() select { - case <-finish: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(1 * time.Second): - t.Fatal("docker exec failed to exit on stdin close") + c.Fatal("docker exec failed to exit on stdin close") } - logDone("exec - Interactive test") } -func TestExecAfterContainerRestart(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "exec", cleanedContainerID, "echo", "hello") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } outStr := strings.TrimSpace(out) if outStr != "hello" { - t.Errorf("container should've printed hello, instead printed %q", outStr) + c.Errorf("container should've printed hello, instead printed %q", outStr) } - logDone("exec - exec running container after container restart") } -func TestExecAfterDaemonRestart(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { + testRequires(c, SameHostDaemon) - d := NewDaemon(t) - if err := d.StartWithBusybox(); err != nil { - t.Fatalf("Could not start daemon with busybox: %v", err) + if err := s.d.StartWithBusybox(); err != nil { + c.Fatalf("Could not start daemon with busybox: %v", err) } - defer d.Stop() - if out, err := d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { - t.Fatalf("Could not run top: err=%v\n%s", err, out) + if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { + c.Fatalf("Could not run top: err=%v\n%s", err, out) } - if err := d.Restart(); err != nil { - t.Fatalf("Could not restart daemon: %v", err) + if err := s.d.Restart(); err != nil { + c.Fatalf("Could not restart daemon: %v", err) } - if out, err := d.Cmd("start", "top"); err != nil { - t.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) + if out, err := s.d.Cmd("start", "top"); err != nil { + c.Fatalf("Could not start top after daemon restart: err=%v\n%s", err, out) } - out, err := d.Cmd("exec", "top", "echo", "hello") + out, err := s.d.Cmd("exec", "top", "echo", "hello") if err != nil { - t.Fatalf("Could not exec on container top: err=%v\n%s", err, out) + c.Fatalf("Could not exec on container top: err=%v\n%s", err, out) } outStr := strings.TrimSpace(string(out)) if outStr != "hello" { - t.Errorf("container should've printed hello, instead printed %q", outStr) + c.Errorf("container should've printed hello, instead printed %q", outStr) } - - logDone("exec - exec running container after daemon restart") } // Regression test for #9155, #9044 -func TestExecEnv(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecEnv(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing", "busybox", "top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "testing", "env") out, _, err := runCommandWithOutput(execCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if strings.Contains(out, "LALA=value1") || !strings.Contains(out, "LALA=value2") || !strings.Contains(out, "HOME=/root") { - t.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") + c.Errorf("exec env(%q), expect %q, %q", out, "LALA=value2", "HOME=/root") } - logDone("exec - exec inherits correct env") } -func TestExecExitStatus(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecExitStatus(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, _, err := runCommandWithStdoutStderr(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // Test normal (non-detached) case first @@ -237,20 +183,18 @@ func TestExecExitStatus(t *testing.T) { ec, _ := runCommand(cmd) if ec != 23 { - t.Fatalf("Should have had an ExitCode of 23, not: %d", ec) + c.Fatalf("Should have had an ExitCode of 23, not: %d", ec) } - logDone("exec - exec non-zero ExitStatus") } -func TestExecPausedContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecPausedContainer(c *check.C) { defer unpauseAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } ContainerID := strings.TrimSpace(out) @@ -258,172 +202,172 @@ func TestExecPausedContainer(t *testing.T) { pausedCmd := exec.Command(dockerBinary, "pause", "testing") out, _, _, err = runCommandWithStdoutStderr(pausedCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } execCmd := exec.Command(dockerBinary, "exec", "-i", "-t", ContainerID, "echo", "hello") out, _, err = runCommandWithOutput(execCmd) if err == nil { - t.Fatal("container should fail to exec new command if it is paused") + c.Fatal("container should fail to exec new command if it is paused") } expected := ContainerID + " is paused, unpause the container before exec" if !strings.Contains(out, expected) { - t.Fatal("container should not exec new command if it is paused") + c.Fatal("container should not exec new command if it is paused") } - logDone("exec - exec should not exec a pause container") } // regression test for #9476 -func TestExecTtyCloseStdin(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecTtyCloseStdin(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cmd = exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") stdinRw, err := cmd.StdinPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } stdinRw.Write([]byte("test")) stdinRw.Close() if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cmd = exec.Command(dockerBinary, "top", "exec_tty_stdin") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } outArr := strings.Split(out, "\n") if len(outArr) > 3 || strings.Contains(out, "nsenter-exec") { // This is the really bad part if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "exec_tty_stdin")); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - t.Fatalf("exec process left running\n\t %s", out) + c.Fatalf("exec process left running\n\t %s", out) } - logDone("exec - stdin is closed properly with tty enabled") } -func TestExecTtyWithoutStdin(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecTtyWithoutStdin(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "-ti", "busybox") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to start container: %v (%v)", out, err) + c.Fatalf("failed to start container: %v (%v)", out, err) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { - t.Fatal(err) + c.Fatal(err) } defer func() { cmd := exec.Command(dockerBinary, "kill", id) if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatalf("failed to kill container: %v (%v)", out, err) + c.Fatalf("failed to kill container: %v (%v)", out, err) } }() - done := make(chan struct{}) + errChan := make(chan error) go func() { - defer close(done) + defer close(errChan) cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") if _, err := cmd.StdinPipe(); err != nil { - t.Fatal(err) + errChan <- err + return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { - t.Fatal("exec should have failed") + errChan <- fmt.Errorf("exec should have failed") + return } else if !strings.Contains(out, expected) { - t.Fatalf("exec failed with error %q: expected %q", out, expected) + errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) + return } }() select { - case <-done: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(3 * time.Second): - t.Fatal("exec is running but should have failed") + c.Fatal("exec is running but should have failed") } - logDone("exec - forbid piped stdin to tty enabled container") } -func TestExecParseError(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecParseError(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // Test normal (non-detached) case first cmd := exec.Command(dockerBinary, "exec", "top") if _, stderr, code, err := runCommandWithStdoutStderr(cmd); err == nil || !strings.Contains(stderr, "See '"+dockerBinary+" exec --help'") || code == 0 { - t.Fatalf("Should have thrown error & point to help: %s", stderr) + c.Fatalf("Should have thrown error & point to help: %s", stderr) } - logDone("exec - error on parseExec should point to help") } -func TestExecStopNotHanging(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top").CombinedOutput(); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if err := exec.Command(dockerBinary, "exec", "testing", "top").Start(); err != nil { - t.Fatal(err) + c.Fatal(err) + } + + type dstop struct { + out []byte + err error } - wait := make(chan struct{}) + ch := make(chan dstop) go func() { - if out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput(); err != nil { - t.Fatal(out, err) - } - close(wait) + out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() + ch <- dstop{out, err} + close(ch) }() select { case <-time.After(3 * time.Second): - t.Fatal("Container stop timed out") - case <-wait: + c.Fatal("Container stop timed out") + case s := <-ch: + c.Assert(s.err, check.IsNil) } - logDone("exec - container with exec not hanging on stop") } -func TestExecCgroup(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestExecCgroup(c *check.C) { var cmd *exec.Cmd cmd = exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") _, err := runCommand(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/1/cgroup") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerCgroups := sort.StringSlice(strings.Split(string(out), "\n")) var wg sync.WaitGroup - var s sync.Mutex + var mu sync.Mutex execCgroups := []sort.StringSlice{} + errChan := make(chan error) // exec a few times concurrently to get consistent failure for i := 0; i < 5; i++ { wg.Add(1) @@ -431,17 +375,23 @@ func TestExecCgroup(t *testing.T) { cmd := exec.Command(dockerBinary, "exec", "testing", "cat", "/proc/self/cgroup") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + errChan <- err + return } cg := sort.StringSlice(strings.Split(string(out), "\n")) - s.Lock() + mu.Lock() execCgroups = append(execCgroups, cg) - s.Unlock() + mu.Unlock() wg.Done() }() } wg.Wait() + close(errChan) + + for err := range errChan { + c.Assert(err, check.IsNil) + } for _, cg := range execCgroups { if !reflect.DeepEqual(cg, containerCgroups) { @@ -454,86 +404,81 @@ func TestExecCgroup(t *testing.T) { for _, name := range containerCgroups { fmt.Printf(" %s\n", name) } - t.Fatal("cgroups mismatched") + c.Fatal("cgroups mismatched") } } - logDone("exec - exec has the container cgroups") } -func TestInspectExecID(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestInspectExecID(c *check.C) { out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "busybox", "top")) if exitCode != 0 || err != nil { - t.Fatalf("failed to run container: %s, %v", out, err) + c.Fatalf("failed to run container: %s, %v", out, err) } id := strings.TrimSuffix(out, "\n") out, err = inspectField(id, "ExecIDs") if err != nil { - t.Fatalf("failed to inspect container: %s, %v", out, err) + c.Fatalf("failed to inspect container: %s, %v", out, err) } - if out != "" { - t.Fatalf("ExecIDs should be empty, got: %s", out) + if out != "[]" { + c.Fatalf("ExecIDs should be empty, got: %s", out) } exitCode, err = runCommand(exec.Command(dockerBinary, "exec", "-d", id, "ls", "/")) if exitCode != 0 || err != nil { - t.Fatalf("failed to exec in container: %s, %v", out, err) + c.Fatalf("failed to exec in container: %s, %v", out, err) } out, err = inspectField(id, "ExecIDs") if err != nil { - t.Fatalf("failed to inspect container: %s, %v", out, err) + c.Fatalf("failed to inspect container: %s, %v", out, err) } out = strings.TrimSuffix(out, "\n") if out == "[]" || out == "" { - t.Fatalf("ExecIDs should not be empty, got: %s", out) + c.Fatalf("ExecIDs should not be empty, got: %s", out) } - logDone("inspect - inspect a container with ExecIDs") } -func TestLinksPingLinkedContainersOnRename(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { var out string - out, _, _ = dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "top") + out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") idA := strings.TrimSpace(out) if idA == "" { - t.Fatal(out, "id should not be nil") + c.Fatal(out, "id should not be nil") } - out, _, _ = dockerCmd(t, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") + out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") idB := strings.TrimSpace(out) if idB == "" { - t.Fatal(out, "id should not be nil") + c.Fatal(out, "id should not be nil") } execCmd := exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") out, _, err := runCommandWithOutput(execCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - dockerCmd(t, "rename", "container1", "container_new") + dockerCmd(c, "rename", "container1", "container_new") execCmd = exec.Command(dockerBinary, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") out, _, err = runCommandWithOutput(execCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - logDone("links - ping linked container upon rename") } -func TestRunExecDir(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunExecDir(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(out) execDir := filepath.Join(execDriverPath, id) @@ -542,92 +487,90 @@ func TestRunExecDir(t *testing.T) { { fi, err := os.Stat(execDir) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !fi.IsDir() { - t.Fatalf("%q must be a directory", execDir) + c.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) if err != nil { - t.Fatal(err) + c.Fatal(err) } } stopCmd := exec.Command(dockerBinary, "stop", id) out, _, err = runCommandWithOutput(stopCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } { _, err := os.Stat(execDir) if err == nil { - t.Fatal(err) + c.Fatal(err) } if err == nil { - t.Fatalf("Exec directory %q exists for removed container!", execDir) + c.Fatalf("Exec directory %q exists for removed container!", execDir) } if !os.IsNotExist(err) { - t.Fatalf("Error should be about non-existing, got %s", err) + c.Fatalf("Error should be about non-existing, got %s", err) } } startCmd := exec.Command(dockerBinary, "start", id) out, _, err = runCommandWithOutput(startCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } { fi, err := os.Stat(execDir) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !fi.IsDir() { - t.Fatalf("%q must be a directory", execDir) + c.Fatalf("%q must be a directory", execDir) } fi, err = os.Stat(stateFile) if err != nil { - t.Fatal(err) + c.Fatal(err) } } rmCmd := exec.Command(dockerBinary, "rm", "-f", id) out, _, err = runCommandWithOutput(rmCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } { _, err := os.Stat(execDir) if err == nil { - t.Fatal(err) + c.Fatal(err) } if err == nil { - t.Fatalf("Exec directory %q is exists for removed container!", execDir) + c.Fatalf("Exec directory %q is exists for removed container!", execDir) } if !os.IsNotExist(err) { - t.Fatalf("Error should be about non-existing, got %s", err) + c.Fatalf("Error should be about non-existing, got %s", err) } } - logDone("run - check execdriver dir behavior") } -func TestRunMutableNetworkFiles(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { + testRequires(c, SameHostDaemon) for _, fn := range []string{"resolv.conf", "hosts"} { deleteAllContainers() content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) if err != nil { - t.Fatal(err) + c.Fatal(err) } if strings.TrimSpace(string(content)) != "success" { - t.Fatal("Content was not what was modified in the container", string(content)) + c.Fatal("Content was not what was modified in the container", string(content)) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "c2", "busybox", "top")) if err != nil { - t.Fatal(err) + c.Fatal(err) } contID := strings.TrimSpace(out) @@ -636,32 +579,83 @@ func TestRunMutableNetworkFiles(t *testing.T) { f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) if err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := f.Seek(0, 0); err != nil { f.Close() - t.Fatal(err) + c.Fatal(err) } if err := f.Truncate(0); err != nil { f.Close() - t.Fatal(err) + c.Fatal(err) } if _, err := f.Write([]byte("success2\n")); err != nil { f.Close() - t.Fatal(err) + c.Fatal(err) } f.Close() res, err := exec.Command(dockerBinary, "exec", contID, "cat", "/etc/"+fn).CombinedOutput() if err != nil { - t.Fatalf("Output: %s, error: %s", res, err) + c.Fatalf("Output: %s, error: %s", res, err) } if string(res) != "success2\n" { - t.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) + c.Fatalf("Expected content of %s: %q, got: %q", fn, "success2\n", res) } } - logDone("run - mutable network files") +} + +func (s *DockerSuite) TestExecWithUser(c *check.C) { + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + cmd := exec.Command(dockerBinary, "exec", "-u", "1", "parent", "id") + out, _, err := runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { + c.Fatalf("exec with user by id expected daemon user got %s", out) + } + + cmd = exec.Command(dockerBinary, "exec", "-u", "root", "parent", "id") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + if !strings.Contains(out, "uid=0(root) gid=0(root)") { + c.Fatalf("exec with user by root expected root user got %s", out) + } + +} + +func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "top") + if out, _, err := runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sda b 8 0") + out, _, err := runCommandWithOutput(cmd) + if err == nil || !strings.Contains(out, "Operation not permitted") { + c.Fatalf("exec mknod in --cap-drop=ALL container without --privileged should failed") + } + + cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + c.Fatal(err, out) + } + + if actual := strings.TrimSpace(out); actual != "ok" { + c.Fatalf("exec mknod in --cap-drop=ALL container with --privileged failed: %v, output: %q", err, out) + } + } diff --git a/integration-cli/docker_cli_exec_unix_test.go b/integration-cli/docker_cli_exec_unix_test.go new file mode 100644 index 0000000000000..bee44b9902593 --- /dev/null +++ b/integration-cli/docker_cli_exec_unix_test.go @@ -0,0 +1,47 @@ +// +build !windows,!test_no_exec + +package main + +import ( + "bytes" + "io" + "os/exec" + "strings" + "time" + + "github.com/go-check/check" + "github.com/kr/pty" +) + +// regression test for #12546 +func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "busybox", "/bin/cat")) + if err != nil { + c.Fatal(err) + } + contId := strings.TrimSpace(out) + + cmd := exec.Command(dockerBinary, "exec", "-i", contId, "echo", "-n", "hello") + p, err := pty.Start(cmd) + if err != nil { + c.Fatal(err) + } + + b := bytes.NewBuffer(nil) + go io.Copy(b, p) + + ch := make(chan error) + go func() { ch <- cmd.Wait() }() + + select { + case err := <-ch: + if err != nil { + c.Errorf("cmd finished with error %v", err) + } + if output := b.String(); strings.TrimSpace(output) != "hello" { + c.Fatalf("Unexpected output %s", output) + } + case <-time.After(1 * time.Second): + c.Fatal("timed out running docker exec") + } +} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go index 2d03179ac68b0..3370a96761cab 100644 --- a/integration-cli/docker_cli_export_import_test.go +++ b/integration-cli/docker_cli_export_import_test.go @@ -4,94 +4,87 @@ import ( "os" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // export an image and try to import it into a new one -func TestExportContainerAndImportImage(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") +func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { + containerID := "testexportcontainerandimportimage" + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", containerID, "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal("failed to create a container", out, err) + c.Fatal("failed to create a container", out, err) } - cleanedContainerID := strings.TrimSpace(out) - - inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectCmd := exec.Command(dockerBinary, "inspect", containerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) + c.Fatalf("output should've been a container id: %s %s ", containerID, err) } - exportCmd := exec.Command(dockerBinary, "export", cleanedContainerID) + exportCmd := exec.Command(dockerBinary, "export", containerID) if out, _, err = runCommandWithOutput(exportCmd); err != nil { - t.Fatalf("failed to export container: %s, %v", out, err) + c.Fatalf("failed to export container: %s, %v", out, err) } importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(importCmd) if err != nil { - t.Fatalf("failed to import image: %s, %v", out, err) + c.Fatalf("failed to import image: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("output should've been an image id: %s, %v", out, err) + c.Fatalf("output should've been an image id: %s, %v", out, err) } - deleteContainer(cleanedContainerID) - deleteImages("repo/testexp:v1") - - logDone("export - export/import a container/image") } // Used to test output flag in the export command -func TestExportContainerWithOutputAndImportImage(t *testing.T) { - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") +func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { + containerID := "testexportcontainerwithoutputandimportimage" + + runCmd := exec.Command(dockerBinary, "run", "-d", "--name", containerID, "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal("failed to create a container", out, err) + c.Fatal("failed to create a container", out, err) } - cleanedContainerID := strings.TrimSpace(out) - - inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) + inspectCmd := exec.Command(dockerBinary, "inspect", containerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("output should've been a container id: %s %s ", cleanedContainerID, err) + c.Fatalf("output should've been a container id: %s %s ", containerID, err) } - exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", cleanedContainerID) + defer os.Remove("testexp.tar") + + exportCmd := exec.Command(dockerBinary, "export", "--output=testexp.tar", containerID) if out, _, err = runCommandWithOutput(exportCmd); err != nil { - t.Fatalf("failed to export container: %s, %v", out, err) + c.Fatalf("failed to export container: %s, %v", out, err) } out, _, err = runCommandWithOutput(exec.Command("cat", "testexp.tar")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") importCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(importCmd) if err != nil { - t.Fatalf("failed to import image: %s, %v", out, err) + c.Fatalf("failed to import image: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) inspectCmd = exec.Command(dockerBinary, "inspect", cleanedImageID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("output should've been an image id: %s, %v", out, err) + c.Fatalf("output should've been an image id: %s, %v", out, err) } - deleteContainer(cleanedContainerID) - deleteImages("repo/testexp:v1") - - os.Remove("/tmp/testexp.tar") - - logDone("export - export/import a container/image with output flag") } diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go index 8fc5cd1aab4eb..d6903e4fb9a3f 100644 --- a/integration-cli/docker_cli_help_test.go +++ b/integration-cli/docker_cli_help_test.go @@ -5,13 +5,13 @@ import ( "os/exec" "runtime" "strings" - "testing" "unicode" "github.com/docker/docker/pkg/homedir" + "github.com/go-check/check" ) -func TestHelpTextVerify(t *testing.T) { +func (s *DockerSuite) TestHelpTextVerify(c *check.C) { // Make sure main help text fits within 80 chars and that // on non-windows system we use ~ when possible (to shorten things). // Test for HOME set to its default value and set to "/" on linux @@ -51,26 +51,26 @@ func TestHelpTextVerify(t *testing.T) { helpCmd.Env = newEnvs out, ec, err := runCommandWithOutput(helpCmd) if err != nil || ec != 0 { - t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) + c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) } lines := strings.Split(out, "\n") for _, line := range lines { if len(line) > 80 { - t.Fatalf("Line is too long(%d chars):\n%s", len(line), line) + c.Fatalf("Line is too long(%d chars):\n%s", len(line), line) } // All lines should not end with a space if strings.HasSuffix(line, " ") { - t.Fatalf("Line should not end with a space: %s", line) + c.Fatalf("Line should not end with a space: %s", line) } if scanForHome && strings.Contains(line, `=`+home) { - t.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) + c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) } if runtime.GOOS != "windows" { i := strings.Index(line, homedir.GetShortcutString()) if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - t.Fatalf("Main help should not have used home shortcut:\n%s", line) + c.Fatalf("Main help should not have used home shortcut:\n%s", line) } } } @@ -82,11 +82,11 @@ func TestHelpTextVerify(t *testing.T) { helpCmd.Env = newEnvs out, ec, err = runCommandWithOutput(helpCmd) if err != nil || ec != 0 { - t.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) + c.Fatalf("docker help should have worked\nout:%s\nec:%d", out, ec) } i := strings.Index(out, "Commands:") if i < 0 { - t.Fatalf("Missing 'Commands:' in:\n%s", out) + c.Fatalf("Missing 'Commands:' in:\n%s", out) } // Grab all chars starting at "Commands:" @@ -106,39 +106,39 @@ func TestHelpTextVerify(t *testing.T) { helpCmd.Env = newEnvs out, ec, err := runCommandWithOutput(helpCmd) if err != nil || ec != 0 { - t.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec) + c.Fatalf("Error on %q help: %s\nexit code:%d", cmd, out, ec) } lines := strings.Split(out, "\n") for _, line := range lines { if len(line) > 80 { - t.Fatalf("Help for %q is too long(%d chars):\n%s", cmd, + c.Fatalf("Help for %q is too long(%d chars):\n%s", cmd, len(line), line) } if scanForHome && strings.Contains(line, `"`+home) { - t.Fatalf("Help for %q should use ~ instead of %q on:\n%s", + c.Fatalf("Help for %q should use ~ instead of %q on:\n%s", cmd, home, line) } i := strings.Index(line, "~") if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - t.Fatalf("Help for %q should not have used ~:\n%s", cmd, line) + c.Fatalf("Help for %q should not have used ~:\n%s", cmd, line) } // If a line starts with 4 spaces then assume someone // added a multi-line description for an option and we need // to flag it if strings.HasPrefix(line, " ") { - t.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line) + c.Fatalf("Help for %q should not have a multi-line option: %s", cmd, line) } // Options should NOT end with a period if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { - t.Fatalf("Help for %q should not end with a period: %s", cmd, line) + c.Fatalf("Help for %q should not end with a period: %s", cmd, line) } // Options should NOT end with a space if strings.HasSuffix(line, " ") { - t.Fatalf("Help for %q should not end with a space: %s", cmd, line) + c.Fatalf("Help for %q should not end with a space: %s", cmd, line) } } @@ -146,10 +146,9 @@ func TestHelpTextVerify(t *testing.T) { expected := 39 if len(cmds) != expected { - t.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q", + c.Fatalf("Wrong # of cmds(%d), it should be: %d\nThe list:\n%q", len(cmds), expected, cmds) } } - logDone("help - verify text") } diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go index ecb0a3a07ec06..d229f1a8c58f8 100644 --- a/integration-cli/docker_cli_history_test.go +++ b/integration-cli/docker_cli_history_test.go @@ -3,15 +3,17 @@ package main import ( "fmt" "os/exec" + "regexp" + "strconv" "strings" - "testing" + + "github.com/go-check/check" ) // This is a heisen-test. Because the created timestamp of images and the behavior of // sort is not predictable it doesn't always fail. -func TestBuildHistory(t *testing.T) { +func (s *DockerSuite) TestBuildHistory(c *check.C) { name := "testbuildhistory" - defer deleteImages(name) _, err := buildImage(name, `FROM busybox RUN echo "A" RUN echo "B" @@ -42,12 +44,12 @@ RUN echo "Z"`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, exitCode, err := runCommandWithOutput(exec.Command(dockerBinary, "history", "testbuildhistory")) if err != nil || exitCode != 0 { - t.Fatalf("failed to get image history: %s, %v", out, err) + c.Fatalf("failed to get image history: %s, %v", out, err) } actualValues := strings.Split(out, "\n")[1:27] @@ -58,27 +60,101 @@ RUN echo "Z"`, actualValue := actualValues[i] if !strings.Contains(actualValue, echoValue) { - t.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) + c.Fatalf("Expected layer \"%s\", but was: %s", expectedValues[i], actualValue) } } - logDone("history - build history") } -func TestHistoryExistentImage(t *testing.T) { +func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { historyCmd := exec.Command(dockerBinary, "history", "busybox") _, exitCode, err := runCommandWithOutput(historyCmd) if err != nil || exitCode != 0 { - t.Fatal("failed to get image history") + c.Fatal("failed to get image history") } - logDone("history - history on existent image must pass") } -func TestHistoryNonExistentImage(t *testing.T) { +func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { historyCmd := exec.Command(dockerBinary, "history", "testHistoryNonExistentImage") _, exitCode, err := runCommandWithOutput(historyCmd) if err == nil || exitCode == 0 { - t.Fatal("history on a non-existent image didn't result in a non-zero exit status") + c.Fatal("history on a non-existent image didn't result in a non-zero exit status") + } +} + +func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { + name := "testhistoryimagewithcomment" + + // make a image through docker commit [ -m messages ] + //runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") + runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "true") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("failed to run container: %s, %v", out, err) + } + + waitCmd := exec.Command(dockerBinary, "wait", name) + if out, _, err := runCommandWithOutput(waitCmd); err != nil { + c.Fatalf("error thrown while waiting for container: %s, %v", out, err) + } + + comment := "This_is_a_comment" + + commitCmd := exec.Command(dockerBinary, "commit", "-m="+comment, name, name) + if out, _, err := runCommandWithOutput(commitCmd); err != nil { + c.Fatalf("failed to commit container to image: %s, %v", out, err) + } + + // test docker history to check comment messages + historyCmd := exec.Command(dockerBinary, "history", name) + out, exitCode, err := runCommandWithOutput(historyCmd) + if err != nil || exitCode != 0 { + c.Fatalf("failed to get image history: %s, %v", out, err) + } + + outputTabs := strings.Fields(strings.Split(out, "\n")[1]) + //outputTabs := regexp.MustCompile(" +").Split(outputLine, -1) + actualValue := outputTabs[len(outputTabs)-1] + + if !strings.Contains(actualValue, comment) { + c.Fatalf("Expected comments %q, but found %q", comment, actualValue) + } + +} + +func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "history", "--human=false", "busybox")) + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + if _, err := strconv.Atoi(strings.TrimSpace(sizeString)); err != nil { + c.Fatalf("The size '%s' was not an Integer", sizeString) + } + } +} + +func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "history", "--human=true", "busybox")) + lines := strings.Split(out, "\n") + sizeColumnRegex, _ := regexp.Compile("SIZE +") + humanSizeRegex, _ := regexp.Compile("^\\d+.*B$") // Matches human sizes like 10 MB, 3.2 KB, etc + indices := sizeColumnRegex.FindStringIndex(lines[0]) + startIndex := indices[0] + endIndex := indices[1] + for i := 1; i < len(lines)-1; i++ { + if endIndex > len(lines[i]) { + endIndex = len(lines[i]) + } + sizeString := lines[i][startIndex:endIndex] + if matchSuccess := humanSizeRegex.MatchString(strings.TrimSpace(sizeString)); !matchSuccess { + c.Fatalf("The size '%s' was not in human format", sizeString) + } } - logDone("history - history on non-existent image must pass") } diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go index 28b091efdf82c..0ab6462509340 100644 --- a/integration-cli/docker_cli_images_test.go +++ b/integration-cli/docker_cli_images_test.go @@ -6,137 +6,124 @@ import ( "reflect" "sort" "strings" - "testing" "time" "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" ) -func TestImagesEnsureImageIsListed(t *testing.T) { +func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err := runCommandWithOutput(imagesCmd) if err != nil { - t.Fatalf("listing images failed with errors: %s, %v", out, err) + c.Fatalf("listing images failed with errors: %s, %v", out, err) } if !strings.Contains(out, "busybox") { - t.Fatal("images should've listed busybox") + c.Fatal("images should've listed busybox") } - logDone("images - busybox should be listed") } -func TestImagesOrderedByCreationDate(t *testing.T) { - defer deleteImages("order:test_a") - defer deleteImages("order:test_c") - defer deleteImages("order:test_b") +func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { id1, err := buildImage("order:test_a", `FROM scratch MAINTAINER dockerio1`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second) id2, err := buildImage("order:test_c", `FROM scratch MAINTAINER dockerio2`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second) id3, err := buildImage("order:test_b", `FROM scratch MAINTAINER dockerio3`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) if err != nil { - t.Fatalf("listing images failed with errors: %s, %v", out, err) + c.Fatalf("listing images failed with errors: %s, %v", out, err) } imgs := strings.Split(out, "\n") if imgs[0] != id3 { - t.Fatalf("First image must be %s, got %s", id3, imgs[0]) + c.Fatalf("First image must be %s, got %s", id3, imgs[0]) } if imgs[1] != id2 { - t.Fatalf("Second image must be %s, got %s", id2, imgs[1]) + c.Fatalf("Second image must be %s, got %s", id2, imgs[1]) } if imgs[2] != id1 { - t.Fatalf("Third image must be %s, got %s", id1, imgs[2]) + c.Fatalf("Third image must be %s, got %s", id1, imgs[2]) } - logDone("images - ordering by creation date") } -func TestImagesErrorWithInvalidFilterNameTest(t *testing.T) { +func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { imagesCmd := exec.Command(dockerBinary, "images", "-f", "FOO=123") out, _, err := runCommandWithOutput(imagesCmd) if !strings.Contains(out, "Invalid filter") { - t.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err) + c.Fatalf("error should occur when listing images with invalid filter name FOO, %s, %v", out, err) } - logDone("images - invalid filter name check working") } -func TestImagesFilterLabel(t *testing.T) { +func (s *DockerSuite) TestImagesFilterLabel(c *check.C) { imageName1 := "images_filter_test1" imageName2 := "images_filter_test2" imageName3 := "images_filter_test3" - defer deleteAllContainers() - defer deleteImages(imageName1) - defer deleteImages(imageName2) - defer deleteImages(imageName3) image1ID, err := buildImage(imageName1, `FROM scratch LABEL match me`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } image2ID, err := buildImage(imageName2, `FROM scratch LABEL match="me too"`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } image3ID, err := buildImage(imageName3, `FROM scratch LABEL nomatch me`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.TrimSpace(out) if (!strings.Contains(out, image1ID) && !strings.Contains(out, image2ID)) || strings.Contains(out, image3ID) { - t.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out) + c.Fatalf("Expected ids %s,%s got %s", image1ID, image2ID, out) } cmd = exec.Command(dockerBinary, "images", "--no-trunc", "-q", "-f", "label=match=me too") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.TrimSpace(out) if out != image2ID { - t.Fatalf("Expected %s got %s", image2ID, out) + c.Fatalf("Expected %s got %s", image2ID, out) } - logDone("images - filter label") } -func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { +func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { imageName := "images_filter_test" - defer deleteAllContainers() - defer deleteImages(imageName) buildImage(imageName, `FROM scratch RUN touch /test/foo @@ -156,7 +143,7 @@ func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { cmd := exec.Command(dockerBinary, "images", "-q", "-f", filter) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } listing := strings.Split(out, "\n") sort.Strings(listing) @@ -172,50 +159,45 @@ func TestImagesFilterWhiteSpaceTrimmingAndLowerCasingWorking(t *testing.T) { } fmt.Print("") } - t.Fatalf("All output must be the same") + c.Fatalf("All output must be the same") } } - logDone("images - white space trimming and lower casing") } -func TestImagesEnsureDanglingImageOnlyListedOnce(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { // create container 1 - c := exec.Command(dockerBinary, "run", "-d", "busybox", "true") - out, _, err := runCommandWithOutput(c) + cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") + out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error running busybox: %s, %v", out, err) + c.Fatalf("error running busybox: %s, %v", out, err) } containerId1 := strings.TrimSpace(out) // tag as foobox - c = exec.Command(dockerBinary, "commit", containerId1, "foobox") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "commit", containerId1, "foobox") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error tagging foobox: %s", err) + c.Fatalf("error tagging foobox: %s", err) } imageId := stringid.TruncateID(strings.TrimSpace(out)) - defer deleteImages(imageId) // overwrite the tag, making the previous image dangling - c = exec.Command(dockerBinary, "tag", "-f", "busybox", "foobox") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "tag", "-f", "busybox", "foobox") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("error tagging foobox: %s", err) + c.Fatalf("error tagging foobox: %s", err) } - defer deleteImages("foobox") - c = exec.Command(dockerBinary, "images", "-q", "-f", "dangling=true") - out, _, err = runCommandWithOutput(c) + cmd = exec.Command(dockerBinary, "images", "-q", "-f", "dangling=true") + out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("listing images failed with errors: %s, %v", out, err) + c.Fatalf("listing images failed with errors: %s, %v", out, err) } if e, a := 1, strings.Count(out, imageId); e != a { - t.Fatalf("expected 1 dangling image, got %d: %s", a, out) + c.Fatalf("expected 1 dangling image, got %d: %s", a, out) } - logDone("images - dangling image only listed once") } diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go index 087d08bd55b59..201dbaa580e7f 100644 --- a/integration-cli/docker_cli_import_test.go +++ b/integration-cli/docker_cli_import_test.go @@ -3,41 +3,39 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestImportDisplay(t *testing.T) { +func (s *DockerSuite) TestImportDisplay(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal("failed to create a container", out, err) + c.Fatal("failed to create a container", out, err) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "export", cleanedContainerID), exec.Command(dockerBinary, "import", "-"), ) if err != nil { - t.Errorf("import failed with errors: %v, output: %q", err, out) + c.Errorf("import failed with errors: %v, output: %q", err, out) } if n := strings.Count(out, "\n"); n != 1 { - t.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out) + c.Fatalf("display is messed up: %d '\\n' instead of 1:\n%s", n, out) } image := strings.TrimSpace(out) - defer deleteImages(image) runCmd = exec.Command(dockerBinary, "run", "--rm", image, "true") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal("failed to create a container", out, err) + c.Fatal("failed to create a container", out, err) } if out != "" { - t.Fatalf("command output should've been nothing, was %q", out) + c.Fatalf("command output should've been nothing, was %q", out) } - logDone("import - display is fine, imported image runs") } diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go index e6b79f01f769a..a7a931e8524a3 100644 --- a/integration-cli/docker_cli_info_test.go +++ b/integration-cli/docker_cli_info_test.go @@ -3,15 +3,16 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // ensure docker info succeeds -func TestInfoEnsureSucceeds(t *testing.T) { +func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { versionCmd := exec.Command(dockerBinary, "info") out, exitCode, err := runCommandWithOutput(versionCmd) if err != nil || exitCode != 0 { - t.Fatalf("failed to execute docker info: %s, %v", out, err) + c.Fatalf("failed to execute docker info: %s, %v", out, err) } // always shown fields @@ -29,9 +30,8 @@ func TestInfoEnsureSucceeds(t *testing.T) { for _, linePrefix := range stringsToCheck { if !strings.Contains(out, linePrefix) { - t.Errorf("couldn't find string %v in output", linePrefix) + c.Errorf("couldn't find string %v in output", linePrefix) } } - logDone("info - verify that it works") } diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index cf42217ac882a..58c61a9d0f8d5 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -1,23 +1,100 @@ package main import ( + "fmt" "os/exec" + "strconv" "strings" - "testing" + + "github.com/go-check/check" ) -func TestInspectImage(t *testing.T) { +func (s *DockerSuite) TestInspectImage(c *check.C) { imageTest := "emptyfs" imageTestID := "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Id}}'", imageTest) out, exitCode, err := runCommandWithOutput(imagesCmd) if exitCode != 0 || err != nil { - t.Fatalf("failed to inspect image: %s, %v", out, err) + c.Fatalf("failed to inspect image: %s, %v", out, err) } if id := strings.TrimSuffix(out, "\n"); id != imageTestID { - t.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) + c.Fatalf("Expected id: %s for image: %s but received id: %s", imageTestID, imageTest, id) + } + +} + +func (s *DockerSuite) TestInspectInt64(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-m=300M", "busybox", "true") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + out = strings.TrimSpace(out) + + inspectCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.Memory}}", out) + inspectOut, _, err := runCommandWithOutput(inspectCmd) + if err != nil { + c.Fatalf("failed to inspect container: %v, output: %q", err, inspectOut) + } + + if strings.TrimSpace(inspectOut) != "314572800" { + c.Fatalf("inspect got wrong value, got: %q, expected: 314572800", inspectOut) + } +} + +func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { + imageTest := "emptyfs" + imagesCmd := exec.Command(dockerBinary, "inspect", "--format='{{.Size}}'", imageTest) + out, exitCode, err := runCommandWithOutput(imagesCmd) + if exitCode != 0 || err != nil { + c.Fatalf("failed to inspect image: %s, %v", out, err) + } + size, err := strconv.Atoi(strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatalf("failed to inspect size of the image: %s, %v", out, err) + } + + //now see if the size turns out to be the same + formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size) + imagesCmd = exec.Command(dockerBinary, "inspect", formatStr, imageTest) + out, exitCode, err = runCommandWithOutput(imagesCmd) + if exitCode != 0 || err != nil { + c.Fatalf("failed to inspect image: %s, %v", out, err) + } + if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result { + c.Fatalf("Expected size: %d for image: %s but received size: %s", size, imageTest, strings.TrimSuffix(out, "\n")) } +} - logDone("inspect - inspect an image") +func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { + runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + + id := strings.TrimSpace(out) + + runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.ExitCode}}'", id) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + exitCode, err := strconv.Atoi(strings.TrimSuffix(out, "\n")) + if err != nil { + c.Fatalf("failed to inspect exitcode of the container: %s, %v", out, err) + } + + //now get the exit code to verify + formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode) + runCmd = exec.Command(dockerBinary, "inspect", formatStr, id) + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + if result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")); err != nil || !result { + c.Fatalf("Expected exitcode: %d for container: %s", exitCode, id) + } } diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go index cd86c0c567667..d08709671da30 100644 --- a/integration-cli/docker_cli_kill_test.go +++ b/integration-cli/docker_cli_kill_test.go @@ -3,73 +3,72 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestKillContainer(t *testing.T) { +func (s *DockerSuite) TestKillContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { - t.Fatalf("failed to kill container: %s, %v", out, err) + c.Fatalf("failed to kill container: %s, %v", out, err) } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) if err != nil { - t.Fatalf("failed to list running containers: %s, %v", out, err) + c.Fatalf("failed to list running containers: %s, %v", out, err) } if strings.Contains(out, cleanedContainerID) { - t.Fatal("killed container is still running") + c.Fatal("killed container is still running") } deleteContainer(cleanedContainerID) - logDone("kill - kill container running top") } -func TestKillDifferentUserContainer(t *testing.T) { +func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-u", "daemon", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("out should've been a container id: %s, %v", out, err) + c.Fatalf("out should've been a container id: %s, %v", out, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { - t.Fatalf("failed to kill container: %s, %v", out, err) + c.Fatalf("failed to kill container: %s, %v", out, err) } listRunningContainersCmd := exec.Command(dockerBinary, "ps", "-q") out, _, err = runCommandWithOutput(listRunningContainersCmd) if err != nil { - t.Fatalf("failed to list running containers: %s, %v", out, err) + c.Fatalf("failed to list running containers: %s, %v", out, err) } if strings.Contains(out, cleanedContainerID) { - t.Fatal("killed container is still running") + c.Fatal("killed container is still running") } deleteContainer(cleanedContainerID) - logDone("kill - kill container running top from a different user") } diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go index 04718c23f0bed..6bb173c10cc20 100644 --- a/integration-cli/docker_cli_links_test.go +++ b/integration-cli/docker_cli_links_test.go @@ -8,89 +8,81 @@ import ( "reflect" "regexp" "strings" - "testing" "time" "github.com/docker/docker/pkg/iptables" + "github.com/go-check/check" ) -func TestLinksEtcHostsRegularFile(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !strings.HasPrefix(out, "-") { - t.Errorf("/etc/hosts should be a regular file") + c.Errorf("/etc/hosts should be a regular file") } - logDone("link - /etc/hosts is a regular file") } -func TestLinksEtcHostsContentMatch(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { + testRequires(c, SameHostDaemon) runCmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hosts") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } hosts, err := ioutil.ReadFile("/etc/hosts") if os.IsNotExist(err) { - t.Skip("/etc/hosts does not exist, skip this test") + c.Skip("/etc/hosts does not exist, skip this test") } if out != string(hosts) { - t.Errorf("container") + c.Errorf("container") } - logDone("link - /etc/hosts matches hosts copy") } -func TestLinksPingUnlinkedContainers(t *testing.T) { +func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") exitCode, err := runCommand(runCmd) if exitCode == 0 { - t.Fatal("run ping did not fail") + c.Fatal("run ping did not fail") } else if exitCode != 1 { - t.Fatalf("run ping failed with errors: %v", err) + c.Fatalf("run ping failed with errors: %v", err) } - logDone("links - ping unlinked container") } // Test for appropriate error when calling --link with an invalid target container -func TestLinksInvalidContainerTarget(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--link", "bogus:alias", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err == nil { - t.Fatal("an invalid container target should produce an error") + c.Fatal("an invalid container target should produce an error") } if !strings.Contains(out, "Could not get container") { - t.Fatalf("error output expected 'Could not get container', but got %q instead; err: %v", out, err) + c.Fatalf("error output expected 'Could not get container', but got %q instead; err: %v", out, err) } - logDone("links - linking to non-existent container should not work") } -func TestLinksPingLinkedContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "container1", "--hostname", "fred", "busybox", "top") if _, err := runCommand(runCmd); err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "-d", "--name", "container2", "--hostname", "wilma", "busybox", "top") if _, err := runCommand(runCmd); err != nil { - t.Fatal(err) + c.Fatal(err) } runArgs := []string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "sh", "-c"} @@ -98,74 +90,68 @@ func TestLinksPingLinkedContainers(t *testing.T) { // test ping by alias, ping by name, and ping by hostname // 1. Ping by alias - dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) // 2. Ping by container name - dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) // 3. Ping by hostname - dockerCmd(t, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) + dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) - logDone("links - ping linked container") } -func TestLinksPingLinkedContainersAfterRename(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { - out, _, _ := dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "top") + out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") idA := strings.TrimSpace(out) - out, _, _ = dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "top") + out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") idB := strings.TrimSpace(out) - dockerCmd(t, "rename", "container1", "container_new") - dockerCmd(t, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - dockerCmd(t, "kill", idA) - dockerCmd(t, "kill", idB) + dockerCmd(c, "rename", "container1", "container_new") + dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") + dockerCmd(c, "kill", idA) + dockerCmd(c, "kill", idB) - logDone("links - ping linked container after rename") } -func TestLinksIpTablesRulesWhenLinkAndUnlink(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { + testRequires(c, SameHostDaemon) - dockerCmd(t, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") - dockerCmd(t, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") - childIP := findContainerIP(t, "child") - parentIP := findContainerIP(t, "parent") + childIP := findContainerIP(c, "child") + parentIP := findContainerIP(c, "parent") sourceRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} destinationRule := []string{"-i", "docker0", "-o", "docker0", "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { - t.Fatal("Iptables rules not found") + c.Fatal("Iptables rules not found") } - dockerCmd(t, "rm", "--link", "parent/http") + dockerCmd(c, "rm", "--link", "parent/http") if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { - t.Fatal("Iptables rules should be removed when unlink") + c.Fatal("Iptables rules should be removed when unlink") } - dockerCmd(t, "kill", "child") - dockerCmd(t, "kill", "parent") + dockerCmd(c, "kill", "child") + dockerCmd(c, "kill", "parent") - logDone("link - verify iptables when link and unlink") } -func TestLinksInspectLinksStarted(t *testing.T) { +func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) - defer deleteAllContainers() - dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "top") - dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "top") - dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { - t.Fatal(err) + c.Fatal(err) } err = unmarshalJSON([]byte(links), &result) if err != nil { - t.Fatal(err) + c.Fatal(err) } output := convertSliceOfStringsToMap(result) @@ -173,28 +159,26 @@ func TestLinksInspectLinksStarted(t *testing.T) { equal := reflect.DeepEqual(output, expected) if !equal { - t.Fatalf("Links %s, expected %s", result, expected) + c.Fatalf("Links %s, expected %s", result, expected) } - logDone("link - links in started container inspect") } -func TestLinksInspectLinksStopped(t *testing.T) { +func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { var ( expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} result []string ) - defer deleteAllContainers() - dockerCmd(t, "run", "-d", "--name", "container1", "busybox", "top") - dockerCmd(t, "run", "-d", "--name", "container2", "busybox", "top") - dockerCmd(t, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") + dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") + dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") links, err := inspectFieldJSON("testinspectlink", "HostConfig.Links") if err != nil { - t.Fatal(err) + c.Fatal(err) } err = unmarshalJSON([]byte(links), &result) if err != nil { - t.Fatal(err) + c.Fatal(err) } output := convertSliceOfStringsToMap(result) @@ -202,47 +186,42 @@ func TestLinksInspectLinksStopped(t *testing.T) { equal := reflect.DeepEqual(output, expected) if !equal { - t.Fatalf("Links %s, but expected %s", result, expected) + c.Fatalf("Links %s, but expected %s", result, expected) } - logDone("link - links in stopped container inspect") } -func TestLinksNotStartedParentNotFail(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { runCmd := exec.Command(dockerBinary, "create", "--name=first", "busybox", "top") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "create", "--name=second", "--link=first:first", "busybox", "top") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "start", "first") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - logDone("link - container start successfully updating stopped parent links") } -func TestLinksHostsFilesInject(t *testing.T) { - testRequires(t, SameHostDaemon, ExecSupport) - - defer deleteAllContainers() +func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { + testRequires(c, SameHostDaemon, ExecSupport) out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "one", "busybox", "top")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } idOne := strings.TrimSpace(out) out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } idTwo := strings.TrimSpace(out) @@ -251,89 +230,104 @@ func TestLinksHostsFilesInject(t *testing.T) { contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") if err != nil { - t.Fatal(err, string(contentOne)) + c.Fatal(err, string(contentOne)) } contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") if err != nil { - t.Fatal(err, string(contentTwo)) + c.Fatal(err, string(contentTwo)) } if !strings.Contains(string(contentTwo), "onetwo") { - t.Fatal("Host is not present in updated hosts file", string(contentTwo)) + c.Fatal("Host is not present in updated hosts file", string(contentTwo)) } - logDone("link - ensure containers hosts files are updated with the link alias.") } -func TestLinksNetworkHostContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true")) if err == nil || !strings.Contains(out, "--net=host can't be used with links. This would result in undefined behavior.") { - t.Fatalf("Running container linking to a container with --net host should have failed: %s", out) + c.Fatalf("Running container linking to a container with --net host should have failed: %s", out) } - logDone("link - error thrown when linking to container with --net host") } -func TestLinksUpdateOnRestart(t *testing.T) { - testRequires(t, SameHostDaemon, ExecSupport) - - defer deleteAllContainers() +func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { + testRequires(c, SameHostDaemon, ExecSupport) if out, err := exec.Command(dockerBinary, "run", "-d", "--name", "one", "busybox", "top").CombinedOutput(); err != nil { - t.Fatal(err, string(out)) + c.Fatal(err, string(out)) } out, err := exec.Command(dockerBinary, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top").CombinedOutput() if err != nil { - t.Fatal(err, string(out)) + c.Fatal(err, string(out)) } id := strings.TrimSpace(string(out)) realIP, err := inspectField("one", "NetworkSettings.IPAddress") if err != nil { - t.Fatal(err) + c.Fatal(err) } content, err := readContainerFileWithExec(id, "/etc/hosts") if err != nil { - t.Fatal(err, string(content)) + c.Fatal(err, string(content)) } getIP := func(hosts []byte, hostname string) string { re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) matches := re.FindSubmatch(hosts) if matches == nil { - t.Fatalf("Hostname %s have no matches in hosts", hostname) + c.Fatalf("Hostname %s have no matches in hosts", hostname) } return string(matches[1]) } if ip := getIP(content, "one"); ip != realIP { - t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) + c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) } if ip := getIP(content, "onetwo"); ip != realIP { - t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) + c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) } if out, err := exec.Command(dockerBinary, "restart", "one").CombinedOutput(); err != nil { - t.Fatal(err, string(out)) + c.Fatal(err, string(out)) } realIP, err = inspectField("one", "NetworkSettings.IPAddress") if err != nil { - t.Fatal(err) + c.Fatal(err) } content, err = readContainerFileWithExec(id, "/etc/hosts") if err != nil { - t.Fatal(err, string(content)) + c.Fatal(err, string(content)) } if ip := getIP(content, "one"); ip != realIP { - t.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) + c.Fatalf("For 'one' alias expected IP: %s, got: %s", realIP, ip) } if ip := getIP(content, "onetwo"); ip != realIP { - t.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) + c.Fatalf("For 'onetwo' alias expected IP: %s, got: %s", realIP, ip) + } +} + +func (s *DockerSuite) TestLinksEnvs(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("Run of first failed: %s\n%s", out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--name=second", "--link=first:first", "busybox", "env") + + out, stde, rc, err := runCommandWithStdoutStderr(runCmd) + if err != nil || rc != 0 { + c.Fatalf("run of 2nd failed: rc: %d, out: %s\n err: %s", rc, out, stde) + } + + if !strings.Contains(out, "FIRST_ENV_e1=\n") || + !strings.Contains(out, "FIRST_ENV_e2=v2") || + !strings.Contains(out, "FIRST_ENV_e3=v3=v3") { + c.Fatalf("Incorrect output: %s", out) } - logDone("link - ensure containers hosts files are updated on restart") } diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go index 9bf90f3adcd0b..3b4431d2d2fbc 100644 --- a/integration-cli/docker_cli_login_test.go +++ b/integration-cli/docker_cli_login_test.go @@ -3,10 +3,11 @@ package main import ( "bytes" "os/exec" - "testing" + + "github.com/go-check/check" ) -func TestLoginWithoutTTY(t *testing.T) { +func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { cmd := exec.Command(dockerBinary, "login") // Send to stdin so the process does not get the TTY @@ -14,8 +15,7 @@ func TestLoginWithoutTTY(t *testing.T) { // run the command and block until it's done if err := cmd.Run(); err == nil { - t.Fatal("Expected non nil err when loginning in & TTY not available") + c.Fatal("Expected non nil err when loginning in & TTY not available") } - logDone("login - login without TTY") } diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go index c236ef0859df8..0a3e1af981841 100644 --- a/integration-cli/docker_cli_logs_test.go +++ b/integration-cli/docker_cli_logs_test.go @@ -5,19 +5,19 @@ import ( "os/exec" "regexp" "strings" - "testing" "time" "github.com/docker/docker/pkg/timeutils" + "github.com/go-check/check" ) // This used to work, it test a log of PageSize-1 (gh#4851) -func TestLogsContainerSmallerThanPage(t *testing.T) { +func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { testLen := 32767 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -26,25 +26,24 @@ func TestLogsContainerSmallerThanPage(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { - t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) - logDone("logs - logs container running echo smaller than page size") } // Regression test: When going over the PageSize, it used to panic (gh#4851) -func TestLogsContainerBiggerThanPage(t *testing.T) { +func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { testLen := 32768 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -53,25 +52,24 @@ func TestLogsContainerBiggerThanPage(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { - t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) - logDone("logs - logs container running echo bigger than page size") } // Regression test: When going much over the PageSize, it used to block (gh#4851) -func TestLogsContainerMuchBiggerThanPage(t *testing.T) { +func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { testLen := 33000 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n =; done; echo", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -80,25 +78,24 @@ func TestLogsContainerMuchBiggerThanPage(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } if len(out) != testLen+1 { - t.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) + c.Fatalf("Expected log length of %d, received %d\n", testLen+1, len(out)) } deleteContainer(cleanedContainerID) - logDone("logs - logs container running echo much bigger than page size") } -func TestLogsTimestamps(t *testing.T) { +func (s *DockerSuite) TestLogsTimestamps(c *check.C) { testLen := 100 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -107,13 +104,13 @@ func TestLogsTimestamps(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", "-t", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } lines := strings.Split(out, "\n") if len(lines) != testLen+1 { - t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } ts := regexp.MustCompile(`^.* `) @@ -122,26 +119,25 @@ func TestLogsTimestamps(t *testing.T) { if l != "" { _, err := time.Parse(timeutils.RFC3339NanoFixed+" ", ts.FindString(l)) if err != nil { - t.Fatalf("Failed to parse timestamp from %v: %v", l, err) + c.Fatalf("Failed to parse timestamp from %v: %v", l, err) } if l[29] != 'Z' { // ensure we have padded 0's - t.Fatalf("Timestamp isn't padded properly: %s", l) + c.Fatalf("Timestamp isn't padded properly: %s", l) } } } deleteContainer(cleanedContainerID) - logDone("logs - logs with timestamps") } -func TestLogsSeparateStderr(t *testing.T) { +func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { msg := "stderr_log" runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -150,30 +146,29 @@ func TestLogsSeparateStderr(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } if stdout != "" { - t.Fatalf("Expected empty stdout stream, got %v", stdout) + c.Fatalf("Expected empty stdout stream, got %v", stdout) } stderr = strings.TrimSpace(stderr) if stderr != msg { - t.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) + c.Fatalf("Expected %v in stderr stream, got %v", msg, stderr) } deleteContainer(cleanedContainerID) - logDone("logs - separate stderr (without pseudo-tty)") } -func TestLogsStderrInStdout(t *testing.T) { +func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { msg := "stderr_log" runCmd := exec.Command(dockerBinary, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -182,30 +177,29 @@ func TestLogsStderrInStdout(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", cleanedContainerID) stdout, stderr, _, err := runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } if stderr != "" { - t.Fatalf("Expected empty stderr stream, got %v", stdout) + c.Fatalf("Expected empty stderr stream, got %v", stdout) } stdout = strings.TrimSpace(stdout) if stdout != msg { - t.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) + c.Fatalf("Expected %v in stdout stream, got %v", msg, stdout) } deleteContainer(cleanedContainerID) - logDone("logs - stderr in stdout (with pseudo-tty)") } -func TestLogsTail(t *testing.T) { +func (s *DockerSuite) TestLogsTail(c *check.C) { testLen := 100 runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -214,49 +208,48 @@ func TestLogsTail(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", "--tail", "5", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } lines := strings.Split(out, "\n") if len(lines) != 6 { - t.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) + c.Fatalf("Expected log %d lines, received %d\n", 6, len(lines)) } logsCmd = exec.Command(dockerBinary, "logs", "--tail", "all", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } lines = strings.Split(out, "\n") if len(lines) != testLen+1 { - t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } logsCmd = exec.Command(dockerBinary, "logs", "--tail", "random", cleanedContainerID) out, _, _, err = runCommandWithStdoutStderr(logsCmd) if err != nil { - t.Fatalf("failed to log container: %s, %v", out, err) + c.Fatalf("failed to log container: %s, %v", out, err) } lines = strings.Split(out, "\n") if len(lines) != testLen+1 { - t.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) + c.Fatalf("Expected log %d lines, received %d\n", testLen+1, len(lines)) } deleteContainer(cleanedContainerID) - logDone("logs - logs tail") } -func TestLogsFollowStopped(t *testing.T) { +func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -264,38 +257,35 @@ func TestLogsFollowStopped(t *testing.T) { logsCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) if err := logsCmd.Start(); err != nil { - t.Fatal(err) + c.Fatal(err) } - c := make(chan struct{}) + errChan := make(chan error) go func() { - if err := logsCmd.Wait(); err != nil { - t.Fatal(err) - } - close(c) + errChan <- logsCmd.Wait() + close(errChan) }() select { - case <-c: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(1 * time.Second): - t.Fatal("Following logs is hanged") + c.Fatal("Following logs is hanged") } deleteContainer(cleanedContainerID) - logDone("logs - logs follow stopped container") } // Regression test for #8832 -func TestLogsFollowSlowStdoutConsumer(t *testing.T) { +func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 200000;yes X | head -c 200000`) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("run failed with errors: %s, %v", out, err) + c.Fatalf("run failed with errors: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) stopSlowRead := make(chan bool) @@ -307,31 +297,24 @@ func TestLogsFollowSlowStdoutConsumer(t *testing.T) { logCmd := exec.Command(dockerBinary, "logs", "-f", cleanedContainerID) stdout, err := logCmd.StdoutPipe() - if err != nil { - t.Fatal(err) - } + c.Assert(err, check.IsNil) if err := logCmd.Start(); err != nil { - t.Fatal(err) + c.Fatal(err) } // First read slowly bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) - if err != nil { - t.Fatal(err) - } + c.Assert(err, check.IsNil) // After the container has finished we can continue reading fast bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) - if err != nil { - t.Fatal(err) - } + c.Assert(err, check.IsNil) actual := bytes1 + bytes2 expected := 200000 if actual != expected { - t.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) + c.Fatalf("Invalid bytes read: %d, expected %d", actual, expected) } - logDone("logs - follow slow consumer") } diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go index 35bd378e4ed09..875b6540ab095 100644 --- a/integration-cli/docker_cli_nat_test.go +++ b/integration-cli/docker_cli_nat_test.go @@ -5,32 +5,32 @@ import ( "net" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestNetworkNat(t *testing.T) { - testRequires(t, SameHostDaemon, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestNetworkNat(c *check.C) { + testRequires(c, SameHostDaemon, NativeExecDriver) iface, err := net.InterfaceByName("eth0") if err != nil { - t.Skipf("Test not running with `make test`. Interface eth0 not found: %s", err) + c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) } ifaceAddrs, err := iface.Addrs() if err != nil || len(ifaceAddrs) == 0 { - t.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) + c.Fatalf("Error retrieving addresses for eth0: %v (%d addresses)", err, len(ifaceAddrs)) } ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) if err != nil { - t.Fatalf("Error retrieving the up for eth0: %s", err) + c.Fatalf("Error retrieving the up for eth0: %s", err) } runCmd := exec.Command(dockerBinary, "run", "-dt", "-p", "8080:8080", "busybox", "nc", "-lp", "8080") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -38,25 +38,24 @@ func TestNetworkNat(t *testing.T) { runCmd = exec.Command(dockerBinary, "run", "busybox", "sh", "-c", fmt.Sprintf("echo hello world | nc -w 30 %s 8080", ifaceIP)) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to retrieve logs for container: %s, %v", out, err) + c.Fatalf("failed to retrieve logs for container: %s, %v", out, err) } out = strings.Trim(out, "\r\n") if expected := "hello world"; out != expected { - t.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP) + c.Fatalf("Unexpected output. Expected: %q, received: %q for iface %s", expected, out, ifaceIP) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { - t.Fatalf("failed to kill container: %s, %v", out, err) + c.Fatalf("failed to kill container: %s, %v", out, err) } - logDone("network - make sure nat works through the host") } diff --git a/integration-cli/docker_cli_pause_test.go b/integration-cli/docker_cli_pause_test.go index 41147b206291e..0256fb92bd379 100644 --- a/integration-cli/docker_cli_pause_test.go +++ b/integration-cli/docker_cli_pause_test.go @@ -4,78 +4,76 @@ import ( "fmt" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestPause(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPause(c *check.C) { defer unpauseAllContainers() name := "testeventpause" - out, _, _ := dockerCmd(t, "images", "-q") + out, _ := dockerCmd(c, "images", "-q") image := strings.Split(out, "\n")[0] - dockerCmd(t, "run", "-d", "--name", name, image, "top") + dockerCmd(c, "run", "-d", "--name", name, image, "top") - dockerCmd(t, "pause", name) + dockerCmd(c, "pause", name) pausedContainers, err := getSliceOfPausedContainers() if err != nil { - t.Fatalf("error thrown while checking if containers were paused: %v", err) + c.Fatalf("error thrown while checking if containers were paused: %v", err) } if len(pausedContainers) != 1 { - t.Fatalf("there should be one paused container and not %d", len(pausedContainers)) + c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) } - dockerCmd(t, "unpause", name) + dockerCmd(c, "unpause", name) - eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= 1 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } pauseEvent := strings.Fields(events[len(events)-3]) unpauseEvent := strings.Fields(events[len(events)-2]) if pauseEvent[len(pauseEvent)-1] != "pause" { - t.Fatalf("event should be pause, not %#v", pauseEvent) + c.Fatalf("event should be pause, not %#v", pauseEvent) } if unpauseEvent[len(unpauseEvent)-1] != "unpause" { - t.Fatalf("event should be unpause, not %#v", unpauseEvent) + c.Fatalf("event should be unpause, not %#v", unpauseEvent) } - logDone("pause - pause/unpause is logged") } -func TestPauseMultipleContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { defer unpauseAllContainers() containers := []string{ "testpausewithmorecontainers1", "testpausewithmorecontainers2", } - out, _, _ := dockerCmd(t, "images", "-q") + out, _ := dockerCmd(c, "images", "-q") image := strings.Split(out, "\n")[0] for _, name := range containers { - dockerCmd(t, "run", "-d", "--name", name, image, "top") + dockerCmd(c, "run", "-d", "--name", name, image, "top") } - dockerCmd(t, append([]string{"pause"}, containers...)...) + dockerCmd(c, append([]string{"pause"}, containers...)...) pausedContainers, err := getSliceOfPausedContainers() if err != nil { - t.Fatalf("error thrown while checking if containers were paused: %v", err) + c.Fatalf("error thrown while checking if containers were paused: %v", err) } if len(pausedContainers) != len(containers) { - t.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers)) + c.Fatalf("there should be %d paused container and not %d", len(containers), len(pausedContainers)) } - dockerCmd(t, append([]string{"unpause"}, containers...)...) + dockerCmd(c, append([]string{"unpause"}, containers...)...) - eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(t).Unix())) + eventsCmd := exec.Command(dockerBinary, "events", "--since=0", fmt.Sprintf("--until=%d", daemonTime(c).Unix())) out, _, _ = runCommandWithOutput(eventsCmd) events := strings.Split(out, "\n") if len(events) <= len(containers)*3-2 { - t.Fatalf("Missing expected event") + c.Fatalf("Missing expected event") } pauseEvents := make([][]string, len(containers)) @@ -87,14 +85,13 @@ func TestPauseMultipleContainers(t *testing.T) { for _, pauseEvent := range pauseEvents { if pauseEvent[len(pauseEvent)-1] != "pause" { - t.Fatalf("event should be pause, not %#v", pauseEvent) + c.Fatalf("event should be pause, not %#v", pauseEvent) } } for _, unpauseEvent := range unpauseEvents { if unpauseEvent[len(unpauseEvent)-1] != "unpause" { - t.Fatalf("event should be unpause, not %#v", unpauseEvent) + c.Fatalf("event should be unpause, not %#v", unpauseEvent) } } - logDone("pause - multi pause/unpause is logged") } diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go index 91c1ee30098e2..f0cb6639648c5 100644 --- a/integration-cli/docker_cli_port_test.go +++ b/integration-cli/docker_cli_port_test.go @@ -1,45 +1,46 @@ package main import ( + "net" "os/exec" "sort" "strings" - "testing" + + "github.com/go-check/check" ) -func TestPortList(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPortList(c *check.C) { // one port runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "port", firstID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { - t.Error("Port list is not correct") + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", firstID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{"80/tcp -> 0.0.0.0:9876"}) { - t.Error("Port list is not correct") + if !assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) { + c.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // three port @@ -50,36 +51,36 @@ func TestPortList(t *testing.T) { "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } ID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{"0.0.0.0:9876"}) { - t.Error("Port list is not correct") + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{ + if !assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) { - t.Error("Port list is not correct") + c.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // more and one port mapped to the same container port @@ -91,46 +92,45 @@ func TestPortList(t *testing.T) { "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } ID = strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "port", ID, "80") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { - t.Error("Port list is not correct") + if !assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) { + c.Error("Port list is not correct") } runCmd = exec.Command(dockerBinary, "port", ID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - if !assertPortList(t, out, []string{ + if !assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "80/tcp -> 0.0.0.0:9999", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) { - t.Error("Port list is not correct\n", out) + c.Error("Port list is not correct\n", out) } runCmd = exec.Command(dockerBinary, "rm", "-f", ID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - logDone("port - test port list") } -func assertPortList(t *testing.T, out string, expected []string) bool { +func assertPortList(c *check.C, out string, expected []string) bool { //lines := strings.Split(out, "\n") lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != len(expected) { - t.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) + c.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) return false } sort.Strings(lines) @@ -138,10 +138,86 @@ func assertPortList(t *testing.T, out string, expected []string) bool { for i := 0; i < len(expected); i++ { if lines[i] != expected[i] { - t.Error("|" + lines[i] + "!=" + expected[i] + "|") + c.Error("|" + lines[i] + "!=" + expected[i] + "|") return false } } return true } + +func (s *DockerSuite) TestPortHostBinding(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-p", "9876:80", "busybox", + "nc", "-l", "-p", "80") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatal(out, err) + } + firstID := strings.TrimSpace(out) + + runCmd = exec.Command(dockerBinary, "port", firstID, "80") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatal(out, err) + } + + if !assertPortList(c, out, []string{"0.0.0.0:9876"}) { + c.Error("Port list is not correct") + } + + runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + if out, _, err = runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", + "nc", "localhost", "9876") + if out, _, err = runCommandWithOutput(runCmd); err == nil { + c.Error("Port is still bound after the Container is removed") + } +} + +func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--expose", "80", "busybox", + "nc", "-l", "-p", "80") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatal(out, err) + } + firstID := strings.TrimSpace(out) + + runCmd = exec.Command(dockerBinary, "port", firstID, "80") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatal(out, err) + } + + _, exposedPort, err := net.SplitHostPort(out) + + if err != nil { + c.Fatal(out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + runCmd = exec.Command(dockerBinary, "rm", "-f", firstID) + if out, _, err = runCommandWithOutput(runCmd); err != nil { + c.Fatal(out, err) + } + + runCmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", + "nc", "localhost", strings.TrimSpace(exposedPort)) + if out, _, err = runCommandWithOutput(runCmd); err == nil { + c.Error("Port is still bound after the Container is removed") + } +} diff --git a/integration-cli/docker_cli_proxy_test.go b/integration-cli/docker_cli_proxy_test.go index b39dd5634d37e..8b55c67d81445 100644 --- a/integration-cli/docker_cli_proxy_test.go +++ b/integration-cli/docker_cli_proxy_test.go @@ -4,30 +4,30 @@ import ( "net" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestCliProxyDisableProxyUnixSock(t *testing.T) { - testRequires(t, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. +func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) { + testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. cmd := exec.Command(dockerBinary, "info") cmd.Env = appendBaseEnv([]string{"HTTP_PROXY=http://127.0.0.1:9999"}) if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - logDone("cli proxy - HTTP_PROXY is not used when connecting to unix sock") } // Can't use localhost here since go has a special case to not use proxy if connecting to localhost -// See http://golang.org/pkg/net/http/#ProxyFromEnvironment -func TestCliProxyProxyTCPSock(t *testing.T) { - testRequires(t, SameHostDaemon) +// See https://golang.org/pkg/net/http/#ProxyFromEnvironment +func (s *DockerDaemonSuite) TestCliProxyProxyTCPSock(c *check.C) { + testRequires(c, SameHostDaemon) // get the IP to use to connect since we can't use localhost addrs, err := net.InterfaceAddrs() if err != nil { - t.Fatal(err) + c.Fatal(err) } var ip string for _, addr := range addrs { @@ -40,25 +40,23 @@ func TestCliProxyProxyTCPSock(t *testing.T) { } if ip == "" { - t.Fatal("could not find ip to connect to") + c.Fatal("could not find ip to connect to") } - d := NewDaemon(t) - if err := d.Start("-H", "tcp://"+ip+":2375"); err != nil { - t.Fatal(err) + if err := s.d.Start("-H", "tcp://"+ip+":2375"); err != nil { + c.Fatal(err) } cmd := exec.Command(dockerBinary, "info") cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} if out, _, err := runCommandWithOutput(cmd); err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } // Test with no_proxy cmd.Env = append(cmd.Env, "NO_PROXY="+ip) if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "info")); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - logDone("cli proxy - HTTP_PROXY is used for TCP sock") } diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go index deb426fce624e..bb34575fbaf48 100644 --- a/integration-cli/docker_cli_ps_test.go +++ b/integration-cli/docker_cli_ps_test.go @@ -6,24 +6,24 @@ import ( "reflect" "strconv" "strings" - "testing" "time" + + "github.com/go-check/check" ) -func TestPsListContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsListContainers(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } secondID := strings.TrimSpace(out) @@ -31,53 +31,53 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } thirdID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } fourthID := strings.TrimSpace(out) // make sure the second is running if err := waitRun(secondID); err != nil { - t.Fatalf("waiting for container failed: %v", err) + c.Fatalf("waiting for container failed: %v", err) } // make sure third one is not running runCmd = exec.Command(dockerBinary, "wait", thirdID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // make sure the forth is running if err := waitRun(fourthID); err != nil { - t.Fatalf("waiting for container failed: %v", err) + c.Fatalf("waiting for container failed: %v", err) } // all runCmd = exec.Command(dockerBinary, "ps", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // running runCmd = exec.Command(dockerBinary, "ps") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, []string{fourthID, secondID, firstID}) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // from here all flag '-a' is ignored @@ -86,156 +86,155 @@ func TestPsListContainers(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected := []string{fourthID, thirdID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "-n=2") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // since runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{fourthID, thirdID, secondID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // before runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{secondID, firstID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--before", thirdID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // since & before runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{thirdID, secondID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // since & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{fourthID, thirdID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "-n=2") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // before & limit runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{thirdID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } // since & before & limit runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1", "-a") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } expected = []string{thirdID} if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } runCmd = exec.Command(dockerBinary, "ps", "--since", firstID, "--before", fourthID, "-n=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if !assertContainerList(out, expected) { - t.Errorf("Container list is not in the correct order: %s", out) + c.Errorf("Container list is not in the correct order: %s", out) } - logDone("ps - test ps options") } func assertContainerList(out string, expected []string) bool { @@ -255,9 +254,7 @@ func assertContainerList(out string, expected []string) bool { return true } -func TestPsListContainersSize(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestPsListContainersSize(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") runCommandWithOutput(cmd) cmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") @@ -267,18 +264,18 @@ func TestPsListContainersSize(t *testing.T) { baseFoundsize := baseLines[1][baseSizeIndex:] baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) if err != nil { - t.Fatal(err) + c.Fatal(err) } name := "test_size" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id, err := getIDByName(name) if err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd = exec.Command(dockerBinary, "ps", "-s", "-n=1") @@ -290,54 +287,52 @@ func TestPsListContainersSize(t *testing.T) { select { case <-wait: case <-time.After(3 * time.Second): - t.Fatalf("Calling \"docker ps -s\" timed out!") + c.Fatalf("Calling \"docker ps -s\" timed out!") } if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != 2 { - t.Fatalf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines)) + c.Fatalf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines)) } sizeIndex := strings.Index(lines[0], "SIZE") idIndex := strings.Index(lines[0], "CONTAINER ID") foundID := lines[1][idIndex : idIndex+12] if foundID != id[:12] { - t.Fatalf("Expected id %s, got %s", id[:12], foundID) + c.Fatalf("Expected id %s, got %s", id[:12], foundID) } expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) foundSize := lines[1][sizeIndex:] if foundSize != expectedSize { - t.Fatalf("Expected size %q, got %q", expectedSize, foundSize) + c.Fatalf("Expected size %q, got %q", expectedSize, foundSize) } - logDone("ps - test ps size") } -func TestPsListContainersFilterStatus(t *testing.T) { +func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { // FIXME: this should test paused, but it makes things hang and its wonky // this is because paused containers can't be controlled by signals - defer deleteAllContainers() // start exited container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) // make sure the exited cintainer is not running runCmd = exec.Command(dockerBinary, "wait", firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // start running container runCmd = exec.Command(dockerBinary, "run", "-itd", "busybox") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } secondID := strings.TrimSpace(out) @@ -345,313 +340,298 @@ func TestPsListContainersFilterStatus(t *testing.T) { runCmd = exec.Command(dockerBinary, "ps", "-q", "--filter=status=exited") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { - t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=status=running") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != secondID[:12] { - t.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) + c.Fatalf("Expected id %s, got %s for running filter, output: %q", secondID[:12], containerOut, out) } - logDone("ps - test ps filter status") } -func TestPsListContainersFilterID(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { // start container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // filter containers by id runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=id="+firstID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { - t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } - logDone("ps - test ps filter id") } -func TestPsListContainersFilterName(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { // start container runCmd := exec.Command(dockerBinary, "run", "-d", "--name=a_name_to_match", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "--name=b_name_to_match", "busybox", "top") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // filter containers by name runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--filter=name=a_name_to_match") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID[:12] { - t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out) } - logDone("ps - test ps filter name") } -func TestPsListContainersFilterLabel(t *testing.T) { +func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { // start container runCmd := exec.Command(dockerBinary, "run", "-d", "-l", "match=me", "-l", "second=tag", "busybox") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstID := strings.TrimSpace(out) // start another container runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "match=me too", "busybox") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } secondID := strings.TrimSpace(out) // start third container runCmd = exec.Command(dockerBinary, "run", "-d", "-l", "nomatch=me", "busybox") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } thirdID := strings.TrimSpace(out) // filter containers by exact match runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut := strings.TrimSpace(out) if containerOut != firstID { - t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) } // filter containers by two labels runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != firstID { - t.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) + c.Fatalf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out) } // filter containers by two labels, but expect not found because of AND behavior runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut = strings.TrimSpace(out) if containerOut != "" { - t.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out) + c.Fatalf("Expected nothing, got %s for exited filter, output: %q", containerOut, out) } // filter containers by exact key runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerOut = strings.TrimSpace(out) if (!strings.Contains(containerOut, firstID) || !strings.Contains(containerOut, secondID)) || strings.Contains(containerOut, thirdID) { - t.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out) + c.Fatalf("Expected ids %s,%s, got %s for exited filter, output: %q", firstID, secondID, containerOut, out) } - - deleteAllContainers() - - logDone("ps - test ps filter label") } -func TestPsListContainersFilterExited(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "top", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "run", "--name", "zero1", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } firstZero, err := getIDByName("zero1") if err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "zero2", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } secondZero, err := getIDByName("zero2") if err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero1", "busybox", "false") if out, _, err := runCommandWithOutput(runCmd); err == nil { - t.Fatal("Should fail.", out, err) + c.Fatal("Should fail.", out, err) } firstNonZero, err := getIDByName("nonzero1") if err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd = exec.Command(dockerBinary, "run", "--name", "nonzero2", "busybox", "false") if out, _, err := runCommandWithOutput(runCmd); err == nil { - t.Fatal("Should fail.", out, err) + c.Fatal("Should fail.", out, err) } secondNonZero, err := getIDByName("nonzero2") if err != nil { - t.Fatal(err) + c.Fatal(err) } // filter containers by exited=0 runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } ids := strings.Split(strings.TrimSpace(out), "\n") if len(ids) != 2 { - t.Fatalf("Should be 2 zero exited containers got %d: %s", len(ids), out) + c.Fatalf("Should be 2 zero exited containers got %d: %s", len(ids), out) } if ids[0] != secondZero { - t.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) + c.Fatalf("First in list should be %q, got %q", secondZero, ids[0]) } if ids[1] != firstZero { - t.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) + c.Fatalf("Second in list should be %q, got %q", firstZero, ids[1]) } runCmd = exec.Command(dockerBinary, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } ids = strings.Split(strings.TrimSpace(out), "\n") if len(ids) != 2 { - t.Fatalf("Should be 2 zero exited containerst got %d", len(ids)) + c.Fatalf("Should be 2 zero exited containers got %d", len(ids)) } if ids[0] != secondNonZero { - t.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) + c.Fatalf("First in list should be %q, got %q", secondNonZero, ids[0]) } if ids[1] != firstNonZero { - t.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) + c.Fatalf("Second in list should be %q, got %q", firstNonZero, ids[1]) } - logDone("ps - test ps filter exited") } -func TestPsRightTagName(t *testing.T) { +func (s *DockerSuite) TestPsRightTagName(c *check.C) { tag := "asybox:shmatest" - defer deleteAllContainers() - defer deleteImages(tag) if out, err := exec.Command(dockerBinary, "tag", "busybox", tag).CombinedOutput(); err != nil { - t.Fatalf("Failed to tag image: %s, out: %q", err, out) + c.Fatalf("Failed to tag image: %s, out: %q", err, out) } var id1 string if out, err := exec.Command(dockerBinary, "run", "-d", "busybox", "top").CombinedOutput(); err != nil { - t.Fatalf("Failed to run container: %s, out: %q", err, out) + c.Fatalf("Failed to run container: %s, out: %q", err, out) } else { id1 = strings.TrimSpace(string(out)) } var id2 string if out, err := exec.Command(dockerBinary, "run", "-d", tag, "top").CombinedOutput(); err != nil { - t.Fatalf("Failed to run container: %s, out: %q", err, out) + c.Fatalf("Failed to run container: %s, out: %q", err, out) } else { id2 = strings.TrimSpace(string(out)) } var imageID string if out, err := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox").CombinedOutput(); err != nil { - t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) + c.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) } else { imageID = strings.TrimSpace(string(out)) } var id3 string if out, err := exec.Command(dockerBinary, "run", "-d", imageID, "top").CombinedOutput(); err != nil { - t.Fatalf("Failed to run container: %s, out: %q", err, out) + c.Fatalf("Failed to run container: %s, out: %q", err, out) } else { id3 = strings.TrimSpace(string(out)) } out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() if err != nil { - t.Fatalf("Failed to run 'ps': %s, out: %q", err, out) + c.Fatalf("Failed to run 'ps': %s, out: %q", err, out) } lines := strings.Split(strings.TrimSpace(string(out)), "\n") // skip header lines = lines[1:] if len(lines) != 3 { - t.Fatalf("There should be 3 running container, got %d", len(lines)) + c.Fatalf("There should be 3 running container, got %d", len(lines)) } for _, line := range lines { f := strings.Fields(line) switch f[0] { case id1: if f[1] != "busybox" { - t.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1]) + c.Fatalf("Expected %s tag for id %s, got %s", "busybox", id1, f[1]) } case id2: if f[1] != tag { - t.Fatalf("Expected %s tag for id %s, got %s", tag, id2, f[1]) + c.Fatalf("Expected %s tag for id %s, got %s", tag, id2, f[1]) } case id3: if f[1] != imageID { - t.Fatalf("Expected %s imageID for id %s, got %s", tag, id3, f[1]) + c.Fatalf("Expected %s imageID for id %s, got %s", tag, id3, f[1]) } default: - t.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) + c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) } } - logDone("ps - right tags for containers") } -func TestPsLinkedWithNoTrunc(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { if out, err := exec.Command(dockerBinary, "run", "--name=first", "-d", "busybox", "top").CombinedOutput(); err != nil { - t.Fatalf("Output: %s, err: %s", out, err) + c.Fatalf("Output: %s, err: %s", out, err) } if out, err := exec.Command(dockerBinary, "run", "--name=second", "--link=first:first", "-d", "busybox", "top").CombinedOutput(); err != nil { - t.Fatalf("Output: %s, err: %s", out, err) + c.Fatalf("Output: %s, err: %s", out, err) } out, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() if err != nil { - t.Fatalf("Output: %s, err: %s", out, err) + c.Fatalf("Output: %s, err: %s", out, err) } lines := strings.Split(strings.TrimSpace(string(out)), "\n") // strip header @@ -663,28 +643,26 @@ func TestPsLinkedWithNoTrunc(t *testing.T) { names = append(names, fields[len(fields)-1]) } if !reflect.DeepEqual(expected, names) { - t.Fatalf("Expected array: %v, got: %v", expected, names) + c.Fatalf("Expected array: %v, got: %v", expected, names) } } -func TestPsGroupPortRange(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { portRange := "3800-3900" out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "ps")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // check that the port range is in the output if !strings.Contains(string(out), portRange) { - t.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out)) + c.Fatalf("docker ps output should have had the port range %q: %s", portRange, string(out)) } - logDone("ps - port range") } diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go index 6e5ddb84083aa..a3ded8f038da8 100644 --- a/integration-cli/docker_cli_pull_test.go +++ b/integration-cli/docker_cli_pull_test.go @@ -4,15 +4,13 @@ import ( "fmt" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // See issue docker/docker#8141 -func TestPullImageWithAliases(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - defer deleteImages(repoName) repos := []string{} for _, tag := range []string{"recent", "fresh"} { @@ -22,93 +20,91 @@ func TestPullImageWithAliases(t *testing.T) { // Tag and push the same image multiple times. for _, repo := range repos { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", "busybox", repo)); err != nil { - t.Fatalf("Failed to tag image %v: error %v, output %q", repos, err, out) + c.Fatalf("Failed to tag image %v: error %v, output %q", repos, err, out) } - defer deleteImages(repo) if out, err := exec.Command(dockerBinary, "push", repo).CombinedOutput(); err != nil { - t.Fatalf("Failed to push image %v: error %v, output %q", repo, err, string(out)) + c.Fatalf("Failed to push image %v: error %v, output %q", repo, err, string(out)) } } // Clear local images store. args := append([]string{"rmi"}, repos...) if out, err := exec.Command(dockerBinary, args...).CombinedOutput(); err != nil { - t.Fatalf("Failed to clean images: error %v, output %q", err, string(out)) + c.Fatalf("Failed to clean images: error %v, output %q", err, string(out)) } // Pull a single tag and verify it doesn't bring down all aliases. pullCmd := exec.Command(dockerBinary, "pull", repos[0]) if out, _, err := runCommandWithOutput(pullCmd); err != nil { - t.Fatalf("Failed to pull %v: error %v, output %q", repoName, err, out) + c.Fatalf("Failed to pull %v: error %v, output %q", repoName, err, out) } if err := exec.Command(dockerBinary, "inspect", repos[0]).Run(); err != nil { - t.Fatalf("Image %v was not pulled down", repos[0]) + c.Fatalf("Image %v was not pulled down", repos[0]) } for _, repo := range repos[1:] { if err := exec.Command(dockerBinary, "inspect", repo).Run(); err == nil { - t.Fatalf("Image %v shouldn't have been pulled down", repo) + c.Fatalf("Image %v shouldn't have been pulled down", repo) } } - - logDone("pull - image with aliases") } // pulling library/hello-world should show verified message -func TestPullVerified(t *testing.T) { +func (s *DockerSuite) TestPullVerified(c *check.C) { + c.Skip("Skipping hub dependent test") + // Image must be pulled from central repository to get verified message // unless keychain is manually updated to contain the daemon's sign key. verifiedName := "hello-world" - defer deleteImages(verifiedName) // pull it expected := "The image you are pulling has been verified" pullCmd := exec.Command(dockerBinary, "pull", verifiedName) if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || !strings.Contains(out, expected) { if err != nil || exitCode != 0 { - t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err) + c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err)) } - t.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err) + c.Fatalf("pulling a verified image failed. expected: %s\ngot: %s, %v", expected, out, err) } // pull it again pullCmd = exec.Command(dockerBinary, "pull", verifiedName) if out, exitCode, err := runCommandWithOutput(pullCmd); err != nil || strings.Contains(out, expected) { if err != nil || exitCode != 0 { - t.Skipf("pulling the '%s' image from the registry has failed: %s", verifiedName, err) + c.Skip(fmt.Sprintf("pulling the '%s' image from the registry has failed: %v", verifiedName, err)) } - t.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err) + c.Fatalf("pulling a verified image failed. unexpected verify message\ngot: %s, %v", out, err) } - logDone("pull - pull verified") } // pulling an image from the central registry should work -func TestPullImageFromCentralRegistry(t *testing.T) { - testRequires(t, Network) - - defer deleteImages("hello-world") +func (s *DockerSuite) TestPullImageFromCentralRegistry(c *check.C) { + testRequires(c, Network) pullCmd := exec.Command(dockerBinary, "pull", "hello-world") if out, _, err := runCommandWithOutput(pullCmd); err != nil { - t.Fatalf("pulling the hello-world image from the registry has failed: %s, %v", out, err) + c.Fatalf("pulling the hello-world image from the registry has failed: %s, %v", out, err) } - logDone("pull - pull hello-world") } // pulling a non-existing image from the central registry should return a non-zero exit code -func TestPullNonExistingImage(t *testing.T) { - pullCmd := exec.Command(dockerBinary, "pull", "fooblahblah1234") - if out, _, err := runCommandWithOutput(pullCmd); err == nil { - t.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) +func (s *DockerSuite) TestPullNonExistingImage(c *check.C) { + testRequires(c, Network) + + name := "sadfsadfasdf" + pullCmd := exec.Command(dockerBinary, "pull", name) + out, _, err := runCommandWithOutput(pullCmd) + + if err == nil || !strings.Contains(out, fmt.Sprintf("Error: image library/%s:latest not found", name)) { + c.Fatalf("expected non-zero exit status when pulling non-existing image: %s", out) } - logDone("pull - pull fooblahblah1234 (non-existing image)") } // pulling an image from the central registry using official names should work // ensure all pulls result in the same image -func TestPullImageOfficialNames(t *testing.T) { - testRequires(t, Network) +func (s *DockerSuite) TestPullImageOfficialNames(c *check.C) { + testRequires(c, Network) names := []string{ "docker.io/hello-world", @@ -121,7 +117,7 @@ func TestPullImageOfficialNames(t *testing.T) { pullCmd := exec.Command(dockerBinary, "pull", name) out, exitCode, err := runCommandWithOutput(pullCmd) if err != nil || exitCode != 0 { - t.Errorf("pulling the '%s' image from the registry has failed: %s", name, err) + c.Errorf("pulling the '%s' image from the registry has failed: %s", name, err) continue } @@ -129,10 +125,28 @@ func TestPullImageOfficialNames(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err = runCommandWithOutput(imagesCmd) if err != nil { - t.Errorf("listing images failed with errors: %v", err) + c.Errorf("listing images failed with errors: %v", err) } else if strings.Contains(out, name) { - t.Errorf("images should not have listed '%s'", name) + c.Errorf("images should not have listed '%s'", name) } } - logDone("pull - pull official names") +} + +func (s *DockerSuite) TestPullScratchNotAllowed(c *check.C) { + testRequires(c, Network) + + pullCmd := exec.Command(dockerBinary, "pull", "scratch") + out, exitCode, err := runCommandWithOutput(pullCmd) + if err == nil { + c.Fatal("expected pull of scratch to fail, but it didn't") + } + if exitCode != 1 { + c.Fatalf("pulling scratch expected exit code 1, got %d", exitCode) + } + if strings.Contains(out, "Pulling repository scratch") { + c.Fatalf("pulling scratch should not have begun: %s", out) + } + if !strings.Contains(out, "'scratch' is a reserved name") { + c.Fatalf("unexpected output pulling scratch: %s", out) + } } diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go index f1274ba706e55..69a05ed821c78 100644 --- a/integration-cli/docker_cli_push_test.go +++ b/integration-cli/docker_cli_push_test.go @@ -6,153 +6,135 @@ import ( "os" "os/exec" "strings" - "testing" "time" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" + "github.com/go-check/check" ) // pulling an image from the central registry should work -func TestPushBusyboxImage(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) // tag the image to upload it to the private registry tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatalf("image tagging failed: %s, %v", out, err) + c.Fatalf("image tagging failed: %s, %v", out, err) } - defer deleteImages(repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { - t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } - logDone("push - busybox to private registry") } // pushing an image without a prefix should throw an error -func TestPushUnprefixedRepo(t *testing.T) { +func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { pushCmd := exec.Command(dockerBinary, "push", "busybox") if out, _, err := runCommandWithOutput(pushCmd); err == nil { - t.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) + c.Fatalf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out) } - logDone("push - unprefixed busybox repo must not pass") } -func TestPushUntagged(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) expected := "Repository does not exist" pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err == nil { - t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out) + c.Fatalf("pushing the image to the private registry should have failed: output %q", out) } else if !strings.Contains(out, expected) { - t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) + c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) } - logDone("push - untagged image") } -func TestPushBadTag(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) expected := "does not exist" pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err == nil { - t.Fatalf("pushing the image to the private registry should have failed: outuput %q", out) + c.Fatalf("pushing the image to the private registry should have failed: output %q", out) } else if !strings.Contains(out, expected) { - t.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) + c.Fatalf("pushing the image failed with an unexpected message: expected %q, got %q", expected, out) } - logDone("push - image with bad tag") } -func TestPushMultipleTags(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) - // tag the image to upload it tot he private registry + // tag the image and upload it to the private registry tagCmd1 := exec.Command(dockerBinary, "tag", "busybox", repoTag1) if out, _, err := runCommandWithOutput(tagCmd1); err != nil { - t.Fatalf("image tagging failed: %s, %v", out, err) + c.Fatalf("image tagging failed: %s, %v", out, err) } - defer deleteImages(repoTag1) tagCmd2 := exec.Command(dockerBinary, "tag", "busybox", repoTag2) if out, _, err := runCommandWithOutput(tagCmd2); err != nil { - t.Fatalf("image tagging failed: %s, %v", out, err) + c.Fatalf("image tagging failed: %s, %v", out, err) } - defer deleteImages(repoTag2) pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { - t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } - logDone("push - multiple tags to private registry") } -func TestPushInterrupt(t *testing.T) { - defer setupRegistry(t)() - +func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - // tag the image to upload it tot he private registry - tagCmd := exec.Command(dockerBinary, "tag", "busybox", repoName) - if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatalf("image tagging failed: %s, %v", out, err) + // tag the image and upload it to the private registry + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", "busybox", repoName)); err != nil { + c.Fatalf("image tagging failed: %s, %v", out, err) } - defer deleteImages(repoName) pushCmd := exec.Command(dockerBinary, "push", repoName) if err := pushCmd.Start(); err != nil { - t.Fatalf("Failed to start pushing to private registry: %v", err) + c.Fatalf("Failed to start pushing to private registry: %v", err) } // Interrupt push (yes, we have no idea at what point it will get killed). time.Sleep(200 * time.Millisecond) if err := pushCmd.Process.Kill(); err != nil { - t.Fatalf("Failed to kill push process: %v", err) + c.Fatalf("Failed to kill push process: %v", err) } - // Try agin - pushCmd = exec.Command(dockerBinary, "push", repoName) - if err := pushCmd.Start(); err != nil { - t.Fatalf("Failed to start pushing to private registry: %v", err) + if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repoName)); err == nil { + str := string(out) + if !strings.Contains(str, "already in progress") { + c.Fatalf("Push should be continued on daemon side, but seems ok: %v, %s", err, out) + } + } + // now wait until all this pushes will complete + // if it failed with timeout - there would be some error, + // so no logic about it here + for exec.Command(dockerBinary, "push", repoName).Run() != nil { } - - logDone("push - interrupted") } -func TestPushEmptyLayer(t *testing.T) { - defer setupRegistry(t)() +func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") if err != nil { - t.Fatalf("Unable to create test file: %v", err) + c.Fatalf("Unable to create test file: %v", err) } tw := tar.NewWriter(emptyTarball) err = tw.Close() if err != nil { - t.Fatalf("Error creating empty tarball: %v", err) + c.Fatalf("Error creating empty tarball: %v", err) } freader, err := os.Open(emptyTarball.Name()) if err != nil { - t.Fatalf("Could not open test tarball: %v", err) + c.Fatalf("Could not open test tarball: %v", err) } importCmd := exec.Command(dockerBinary, "import", "-", repoName) importCmd.Stdin = freader out, _, err := runCommandWithOutput(importCmd) if err != nil { - t.Errorf("import failed with errors: %v, output: %q", err, out) + c.Errorf("import failed with errors: %v, output: %q", err, out) } // Now verify we can push it pushCmd := exec.Command(dockerBinary, "push", repoName) if out, _, err := runCommandWithOutput(pushCmd); err != nil { - t.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) + c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } - logDone("push - empty layer config to private registry") } diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go index ed24d971d5de6..156ea6eeb3065 100644 --- a/integration-cli/docker_cli_rename_test.go +++ b/integration-cli/docker_cli_rename_test.go @@ -3,16 +3,16 @@ package main import ( "os/exec" "strings" - "testing" -) -func TestRenameStoppedContainer(t *testing.T) { - defer deleteAllContainers() + "github.com/docker/docker/pkg/stringid" + "github.com/go-check/check" +) +func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -20,102 +20,93 @@ func TestRenameStoppedContainer(t *testing.T) { runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } name, err := inspectField(cleanedContainerID, "Name") - runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") + newName := "new_name" + stringid.GenerateRandomID() + runCmd = exec.Command(dockerBinary, "rename", "first_name", newName) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } name, err = inspectField(cleanedContainerID, "Name") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if name != "/new_name" { - t.Fatal("Failed to rename container ", name) + if name != "/"+newName { + c.Fatal("Failed to rename container ", name) } - logDone("rename - stopped container") } -func TestRenameRunningContainer(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } + newName := "new_name" + stringid.GenerateRandomID() cleanedContainerID := strings.TrimSpace(out) - runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") + runCmd = exec.Command(dockerBinary, "rename", "first_name", newName) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } name, err := inspectField(cleanedContainerID, "Name") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if name != "/new_name" { - t.Fatal("Failed to rename container ") + if name != "/"+newName { + c.Fatal("Failed to rename container ") } - - logDone("rename - running container") } -func TestRenameCheckNames(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRenameCheckNames(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "first_name", "-d", "busybox", "sh") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } - runCmd = exec.Command(dockerBinary, "rename", "first_name", "new_name") + newName := "new_name" + stringid.GenerateRandomID() + runCmd = exec.Command(dockerBinary, "rename", "first_name", newName) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } - name, err := inspectField("new_name", "Name") + name, err := inspectField(newName, "Name") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if name != "/new_name" { - t.Fatal("Failed to rename container ") + if name != "/"+newName { + c.Fatal("Failed to rename container ") } name, err = inspectField("first_name", "Name") if err == nil && !strings.Contains(err.Error(), "No such image or container: first_name") { - t.Fatal(err) + c.Fatal(err) } - - logDone("rename - old name released") } -func TestRenameInvalidName(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRenameInvalidName(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "myname", "-d", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } runCmd = exec.Command(dockerBinary, "rename", "myname", "new:invalid") if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Invalid container name") { - t.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err) + c.Fatalf("Renaming container to invalid name should have failed: %s\n%v", out, err) } runCmd = exec.Command(dockerBinary, "ps", "-a") if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "myname") { - t.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err) + c.Fatalf("Output of docker ps should have included 'myname': %s\n%v", out, err) } - - logDone("rename - invalid container name") } diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go index dde450dcd0515..2b9d5e2323511 100644 --- a/integration-cli/docker_cli_restart_test.go +++ b/integration-cli/docker_cli_restart_test.go @@ -3,61 +3,59 @@ package main import ( "os/exec" "strings" - "testing" "time" + + "github.com/go-check/check" ) -func TestRestartStoppedContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "foobar") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) runCmd = exec.Command(dockerBinary, "wait", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "foobar\n" { - t.Errorf("container should've printed 'foobar'") + c.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "foobar\nfoobar\n" { - t.Errorf("container should've printed 'foobar' twice") + c.Errorf("container should've printed 'foobar' twice") } - logDone("restart - echo foobar for stopped container") } -func TestRestartRunningContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -67,41 +65,39 @@ func TestRestartRunningContainer(t *testing.T) { runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "foobar\n" { - t.Errorf("container should've printed 'foobar'") + c.Errorf("container should've printed 'foobar'") } runCmd = exec.Command(dockerBinary, "restart", "-t", "1", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "logs", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } time.Sleep(1 * time.Second) if out != "foobar\nfoobar\n" { - t.Errorf("container should've printed 'foobar' twice") + c.Errorf("container should've printed 'foobar' twice") } - logDone("restart - echo foobar for running container") } // Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. -func TestRestartWithVolumes(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "-v", "/test", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -109,148 +105,139 @@ func TestRestartWithVolumes(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out = strings.Trim(out, " \n\r"); out != "1" { - t.Errorf("expect 1 volume received %s", out) + c.Errorf("expect 1 volume received %s", out) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumes, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(volumes, err) + c.Fatal(volumes, err) } runCmd = exec.Command(dockerBinary, "restart", cleanedContainerID) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ len .Volumes }}", cleanedContainerID) out, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out = strings.Trim(out, " \n\r"); out != "1" { - t.Errorf("expect 1 volume after restart received %s", out) + c.Errorf("expect 1 volume after restart received %s", out) } runCmd = exec.Command(dockerBinary, "inspect", "--format", "{{ .Volumes }}", cleanedContainerID) volumesAfterRestart, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(volumesAfterRestart, err) + c.Fatal(volumesAfterRestart, err) } if volumes != volumesAfterRestart { volumes = strings.Trim(volumes, " \n\r") volumesAfterRestart = strings.Trim(volumesAfterRestart, " \n\r") - t.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart) + c.Errorf("expected volume path: %s Actual path: %s", volumes, volumesAfterRestart) } - logDone("restart - does not create a new volume on restart") } -func TestRestartPolicyNO(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--restart=no", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if name != "no" { - t.Fatalf("Container restart policy name is %s, expected %s", name, "no") + c.Fatalf("Container restart policy name is %s, expected %s", name, "no") } - logDone("restart - recording restart policy name for --restart=no") } -func TestRestartPolicyAlways(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--restart=always", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if name != "always" { - t.Fatalf("Container restart policy name is %s, expected %s", name, "always") + c.Fatalf("Container restart policy name is %s, expected %s", name, "always") } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") if err != nil { - t.Fatal(err) + c.Fatal(err) } // MaximumRetryCount=0 if the restart policy is always if MaximumRetryCount != "0" { - t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "0") + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "0") } - logDone("restart - recording restart policy name for --restart=always") } -func TestRestartPolicyOnFailure(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:1", "busybox", "false") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(string(out)) name, err := inspectField(id, "HostConfig.RestartPolicy.Name") if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if name != "on-failure" { - t.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure") + c.Fatalf("Container restart policy name is %s, expected %s", name, "on-failure") } - logDone("restart - recording restart policy name for --restart=on-failure") } // a good container with --restart=on-failure:3 // MaximumRetryCount!=0; RestartCount=0 -func TestContainerRestartwithGoodContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestContainerRestartwithGoodContainer(c *check.C) { out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "true").CombinedOutput() if err != nil { - t.Fatal(string(out), err) + c.Fatal(string(out), err) } id := strings.TrimSpace(string(out)) if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil { - t.Fatal(err) + c.Fatal(err) } count, err := inspectField(id, "RestartCount") if err != nil { - t.Fatal(err) + c.Fatal(err) } if count != "0" { - t.Fatalf("Container was restarted %s times, expected %d", count, 0) + c.Fatalf("Container was restarted %s times, expected %d", count, 0) } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") if err != nil { - t.Fatal(err) + c.Fatal(err) } if MaximumRetryCount != "3" { - t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } - logDone("restart - for a good container with restart policy, MaximumRetryCount is not 0 and RestartCount is 0") } diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go index d01b36d45d2d4..b8d1b843d1502 100644 --- a/integration-cli/docker_cli_rm_test.go +++ b/integration-cli/docker_cli_rm_test.go @@ -1,97 +1,82 @@ package main import ( + "net/http" "os" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestRmContainerWithRemovedVolume(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "--name", "losemyvolumes", "-v", "/tmp/testing:/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := os.Remove("/tmp/testing"); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "rm", "-v", "losemyvolumes") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - logDone("rm - removed volume") } -func TestRmContainerWithVolume(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "foo", "-v", "/srv", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "rm", "-v", "foo") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("rm - volume") } -func TestRmRunningContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmRunningContainer(c *check.C) { - createRunningContainer(t, "foo") + createRunningContainer(c, "foo") // Test cannot remove running container cmd := exec.Command(dockerBinary, "rm", "foo") if _, err := runCommand(cmd); err == nil { - t.Fatalf("Expected error, can't rm a running container") + c.Fatalf("Expected error, can't rm a running container") } - logDone("rm - running container") } -func TestRmRunningContainerCheckError409(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmRunningContainerCheckError409(c *check.C) { - createRunningContainer(t, "foo") + createRunningContainer(c, "foo") endpoint := "/containers/foo" - _, err := sockRequest("DELETE", endpoint, nil) - - if err == nil { - t.Fatalf("Expected error, can't rm a running container") - } - if !strings.Contains(err.Error(), "409 Conflict") { - t.Fatalf("Expected error to contain '409 Conflict' but found %s", err) - } - - logDone("rm - running container") + status, _, err := sockRequest("DELETE", endpoint, nil) + c.Assert(status, check.Equals, http.StatusConflict) + c.Assert(err, check.IsNil) } -func TestRmForceRemoveRunningContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmForceRemoveRunningContainer(c *check.C) { - createRunningContainer(t, "foo") + createRunningContainer(c, "foo") // Stop then remove with -s cmd := exec.Command(dockerBinary, "rm", "-f", "foo") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("rm - running container with --force=true") } -func TestRmContainerOrphaning(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { dockerfile1 := `FROM busybox:latest ENTRYPOINT ["/bin/true"]` @@ -102,47 +87,44 @@ func TestRmContainerOrphaning(t *testing.T) { // build first dockerfile img1, err := buildImage(img, dockerfile1, true) - defer deleteImages(img1) if err != nil { - t.Fatalf("Could not build image %s: %v", img, err) + c.Fatalf("Could not build image %s: %v", img, err) } // run container on first image if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", img)); err != nil { - t.Fatalf("Could not run image %s: %v: %s", img, err, out) + c.Fatalf("Could not run image %s: %v: %s", img, err, out) } // rebuild dockerfile with a small addition at the end if _, err := buildImage(img, dockerfile2, true); err != nil { - t.Fatalf("Could not rebuild image %s: %v", img, err) + c.Fatalf("Could not rebuild image %s: %v", img, err) } // try to remove the image, should error out. if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", img)); err == nil { - t.Fatalf("Expected to error out removing the image, but succeeded: %s", out) + c.Fatalf("Expected to error out removing the image, but succeeded: %s", out) } // check if we deleted the first image out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "images", "-q", "--no-trunc")) if err != nil { - t.Fatalf("%v: %s", err, out) + c.Fatalf("%v: %s", err, out) } if !strings.Contains(out, img1) { - t.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) + c.Fatalf("Orphaned container (could not find %q in docker images): %s", img1, out) } - logDone("rm - container orphaning") } -func TestRmInvalidContainer(t *testing.T) { +func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rm", "unknown")); err == nil { - t.Fatal("Expected error on rm unknown container, got none") + c.Fatal("Expected error on rm unknown container, got none") } else if !strings.Contains(out, "failed to remove one or more containers") { - t.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out) + c.Fatalf("Expected output to contain 'failed to remove one or more containers', got %q", out) } - logDone("rm - delete unknown container") } -func createRunningContainer(t *testing.T, name string) { +func createRunningContainer(c *check.C, name string) { cmd := exec.Command(dockerBinary, "run", "-dt", "--name", name, "busybox", "top") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } } diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go index 277004d2ecca7..9dc2ee297a8d7 100644 --- a/integration-cli/docker_cli_rmi_test.go +++ b/integration-cli/docker_cli_rmi_test.go @@ -1,19 +1,21 @@ package main import ( + "fmt" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestRmiWithContainerFails(t *testing.T) { +func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { errSubstr := "is using it" // create a container runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %s, %v", out, err) + c.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -22,86 +24,125 @@ func TestRmiWithContainerFails(t *testing.T) { runCmd = exec.Command(dockerBinary, "rmi", "busybox") out, _, err = runCommandWithOutput(runCmd) if err == nil { - t.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) + c.Fatalf("Container %q is using image, should not be able to rmi: %q", cleanedContainerID, out) } if !strings.Contains(out, errSubstr) { - t.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) + c.Fatalf("Container %q is using image, error message should contain %q: %v", cleanedContainerID, errSubstr, out) } // make sure it didn't delete the busybox name - images, _, _ := dockerCmd(t, "images") + images, _ := dockerCmd(c, "images") if !strings.Contains(images, "busybox") { - t.Fatalf("The name 'busybox' should not have been removed from images: %q", images) + c.Fatalf("The name 'busybox' should not have been removed from images: %q", images) } deleteContainer(cleanedContainerID) - logDone("rmi - container using image while rmi, should not remove image name") } -func TestRmiTag(t *testing.T) { - imagesBefore, _, _ := dockerCmd(t, "images", "-a") - dockerCmd(t, "tag", "busybox", "utest:tag1") - dockerCmd(t, "tag", "busybox", "utest/docker:tag2") - dockerCmd(t, "tag", "busybox", "utest:5000/docker:tag3") +func (s *DockerSuite) TestRmiTag(c *check.C) { + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox", "utest:tag1") + dockerCmd(c, "tag", "busybox", "utest/docker:tag2") + dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") { - imagesAfter, _, _ := dockerCmd(t, "images", "-a") + imagesAfter, _ := dockerCmd(c, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+3 { - t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - dockerCmd(t, "rmi", "utest/docker:tag2") + dockerCmd(c, "rmi", "utest/docker:tag2") { - imagesAfter, _, _ := dockerCmd(t, "images", "-a") + imagesAfter, _ := dockerCmd(c, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+2 { - t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - dockerCmd(t, "rmi", "utest:5000/docker:tag3") + dockerCmd(c, "rmi", "utest:5000/docker:tag3") { - imagesAfter, _, _ := dockerCmd(t, "images", "-a") + imagesAfter, _ := dockerCmd(c, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+1 { - t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) } } - dockerCmd(t, "rmi", "utest:tag1") + dockerCmd(c, "rmi", "utest:tag1") { - imagesAfter, _, _ := dockerCmd(t, "images", "-a") + imagesAfter, _ := dockerCmd(c, "images", "-a") if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+0 { - t.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + c.Fatalf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter) + } + + } +} + +func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") + out, _, err := runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("failed to create a container:%s, %v", out, err) + } + containerID := strings.TrimSpace(out) + runCmd = exec.Command(dockerBinary, "commit", containerID, "busybox-test") + out, _, err = runCommandWithOutput(runCmd) + if err != nil { + c.Fatalf("failed to commit a new busybox-test:%s, %v", out, err) + } + + imagesBefore, _ := dockerCmd(c, "images", "-a") + dockerCmd(c, "tag", "busybox-test", "utest:tag1") + dockerCmd(c, "tag", "busybox-test", "utest:tag2") + dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") + dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Count(imagesAfter, "\n") != strings.Count(imagesBefore, "\n")+4 { + c.Fatalf("tag busybox to create 4 more images with same imageID; docker images shows: %q\n", imagesAfter) + } + } + out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox-test") + imgID := strings.TrimSpace(out) + + // first checkout without force it fails + runCmd = exec.Command(dockerBinary, "rmi", imgID) + out, _, err = runCommandWithOutput(runCmd) + if err == nil || !strings.Contains(out, fmt.Sprintf("Conflict, cannot delete image %s because it is tagged in multiple repositories, use -f to force", imgID)) { + c.Fatalf("rmi tagged in multiple repos should have failed without force:%s, %v", out, err) + } + + dockerCmd(c, "rmi", "-f", imgID) + { + imagesAfter, _ := dockerCmd(c, "images", "-a") + if strings.Contains(imagesAfter, imgID[:12]) { + c.Fatalf("rmi -f %s failed, image still exists: %q\n\n", imgID, imagesAfter) } } - logDone("rmi - tag,rmi - tagging the same images multiple times then removing tags") } -func TestRmiTagWithExistingContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { container := "test-delete-tag" newtag := "busybox:newtag" bb := "busybox:latest" if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "tag", bb, newtag)); err != nil { - t.Fatalf("Could not tag busybox: %v: %s", err, out) + c.Fatalf("Could not tag busybox: %v: %s", err, out) } if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", container, bb, "/bin/true")); err != nil { - t.Fatalf("Could not run busybox: %v: %s", err, out) + c.Fatalf("Could not run busybox: %v: %s", err, out) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", newtag)) if err != nil { - t.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) + c.Fatalf("Could not remove tag %s: %v: %s", newtag, err, out) } if d := strings.Count(out, "Untagged: "); d != 1 { - t.Fatalf("Expected 1 untagged entry got %d: %q", d, out) + c.Fatalf("Expected 1 untagged entry got %d: %q", d, out) } - logDone("rmi - delete tag with existing containers") } -func TestRmiForceWithExistingContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { image := "busybox-clone" @@ -110,64 +151,60 @@ func TestRmiForceWithExistingContainers(t *testing.T) { MAINTAINER foo`) if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatalf("Could not build %s: %s, %v", image, out, err) + c.Fatalf("Could not build %s: %s, %v", image, out, err) } if out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "test-force-rmi", image, "/bin/true")); err != nil { - t.Fatalf("Could not run container: %s, %v", out, err) + c.Fatalf("Could not run container: %s, %v", out, err) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "rmi", "-f", image)) if err != nil { - t.Fatalf("Could not remove image %s: %s, %v", image, out, err) + c.Fatalf("Could not remove image %s: %s, %v", image, out, err) } - logDone("rmi - force delete with existing containers") } -func TestRmiWithMultipleRepositories(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { newRepo := "127.0.0.1:5000/busybox" oldRepo := "busybox" newTag := "busybox:test" cmd := exec.Command(dockerBinary, "tag", oldRepo, newRepo) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("Could not tag busybox: %v: %s", err, out) + c.Fatalf("Could not tag busybox: %v: %s", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "test", oldRepo, "touch", "/home/abcd") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %s", err, out) + c.Fatalf("failed to run container: %v, output: %s", err, out) } cmd = exec.Command(dockerBinary, "commit", "test", newTag) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to commit container: %v, output: %s", err, out) + c.Fatalf("failed to commit container: %v, output: %s", err, out) } cmd = exec.Command(dockerBinary, "rmi", newTag) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to remove image: %v, output: %s", err, out) + c.Fatalf("failed to remove image: %v, output: %s", err, out) } if !strings.Contains(out, "Untagged: "+newTag) { - t.Fatalf("Could not remove image %s: %s, %v", newTag, out, err) + c.Fatalf("Could not remove image %s: %s, %v", newTag, out, err) } - logDone("rmi - delete a image which its dependency tagged to multiple repositories success") } -func TestRmiBlank(t *testing.T) { +func (s *DockerSuite) TestRmiBlank(c *check.C) { // try to delete a blank image name runCmd := exec.Command(dockerBinary, "rmi", "") out, _, err := runCommandWithOutput(runCmd) if err == nil { - t.Fatal("Should have failed to delete '' image") + c.Fatal("Should have failed to delete '' image") } if strings.Contains(out, "No such image") { - t.Fatalf("Wrong error message generated: %s", out) + c.Fatalf("Wrong error message generated: %s", out) } - logDone("rmi - blank image name") } diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go index a5d1e3e07024b..0cf5c31eeeac9 100644 --- a/integration-cli/docker_cli_run_test.go +++ b/integration-cli/docker_cli_run_test.go @@ -16,687 +16,593 @@ import ( "strconv" "strings" "sync" - "testing" "time" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/resolvconf" + "github.com/go-check/check" ) // "test123" should be printed by docker run -func TestRunEchoStdout(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunEchoStdout(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "busybox", "echo", "test123") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test123\n" { - t.Errorf("container should've printed 'test123'") + c.Fatalf("container should've printed 'test123'") } - - logDone("run - echo test123") } // "test" should be printed -func TestRunEchoStdoutWithMemoryLimit(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunEchoStdoutWithMemoryLimit(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.Trim(out, "\r\n") if expected := "test"; out != expected { - t.Errorf("container should've printed %q but printed %q", expected, out) - + c.Fatalf("container should've printed %q but printed %q", expected, out) } - - logDone("run - echo with memory limit") } // should run without memory swap -func TestRunWithoutMemoryswapLimit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-m", "16m", "--memory-swap", "-1", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to run container, output: %q", out) + c.Fatalf("failed to run container, output: %q", out) } - - logDone("run - without memory swap limit") } // "test" should be printed -func TestRunEchoStdoutWitCPULimit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunEchoStdoutWitCPULimit(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { - t.Errorf("container should've printed 'test'") + c.Errorf("container should've printed 'test'") } - - logDone("run - echo with CPU limit") } // "test" should be printed -func TestRunEchoStdoutWithCPUAndMemoryLimit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunEchoStdoutWithCPUAndMemoryLimit(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-c", "1000", "-m", "16m", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { - t.Errorf("container should've printed 'test', got %q instead", out) + c.Errorf("container should've printed 'test', got %q instead", out) } - - logDone("run - echo with CPU and memory limit") } // "test" should be printed -func TestRunEchoNamedContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunEchoStdoutWitCPUQuota(c *check.C) { + runCmd := exec.Command(dockerBinary, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "echo", "test") + out, _, _, err := runCommandWithStdoutStderr(runCmd) + if err != nil { + c.Fatalf("failed to run container: %v, output: %q", err, out) + } + out = strings.TrimSpace(out) + if strings.Contains(out, "Your kernel does not support CPU cfs quota") { + c.Skip("Your kernel does not support CPU cfs quota, skip this test") + } + if out != "test" { + c.Errorf("container should've printed 'test'") + } + cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.CpuQuota}}", "test") + out, _, err = runCommandWithOutput(cmd) + if err != nil { + c.Fatalf("failed to inspect container: %s, %v", out, err) + } + out = strings.TrimSpace(out) + if out != "8000" { + c.Errorf("setting the CPU CFS quota failed") + } +} + +// "test" should be printed +func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if out != "test\n" { - t.Errorf("container should've printed 'test'") + c.Errorf("container should've printed 'test'") } if err := deleteContainer("testfoonamedcontainer"); err != nil { - t.Errorf("failed to remove the named container: %v", err) + c.Errorf("failed to remove the named container: %v", err) } - - logDone("run - echo with named container") } // docker run should not leak file descriptors -func TestRunLeakyFileDescriptors(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "busybox", "ls", "-C", "/proc/self/fd") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory if out != "0 1 2 3\n" { - t.Errorf("container should've printed '0 1 2 3', not: %s", out) + c.Errorf("container should've printed '0 1 2 3', not: %s", out) } - - logDone("run - check file descriptor leakage") } // it should be possible to lookup Google DNS // this will fail when Internet access is unavailable -func TestRunLookupGoogleDns(t *testing.T) { - testRequires(t, Network) - defer deleteAllContainers() +func (s *DockerSuite) TestRunLookupGoogleDns(c *check.C) { + testRequires(c, Network) out, _, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "run", "busybox", "nslookup", "google.com")) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } - - logDone("run - nslookup google.com") } // the exit code should be 0 // some versions of lxc might make this test fail -func TestRunExitCodeZero(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "busybox", "true") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Errorf("container should've exited with exit code 0: %s, %v", out, err) + c.Errorf("container should've exited with exit code 0: %s, %v", out, err) } - - logDone("run - exit with 0") } // the exit code should be 1 // some versions of lxc might make this test fail -func TestRunExitCodeOne(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "busybox", "false") exitCode, err := runCommand(runCmd) if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { - t.Fatal(err) + c.Fatal(err) } if exitCode != 1 { - t.Errorf("container should've exited with exit code 1") + c.Errorf("container should've exited with exit code 1") } - - logDone("run - exit with 1") } // it should be possible to pipe in data via stdin to a process running in a container // some versions of lxc might make this test fail -func TestRunStdinPipe(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunStdinPipe(c *check.C) { runCmd := exec.Command("bash", "-c", `echo "blahblah" | docker run -i -a stdin busybox cat`) out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) if out, _, err := runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("out should've been a container id: %s %v", out, err) + c.Fatalf("out should've been a container id: %s %v", out, err) } waitCmd := exec.Command(dockerBinary, "wait", out) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } logsCmd := exec.Command(dockerBinary, "logs", out) logsOut, _, err := runCommandWithOutput(logsCmd) if err != nil { - t.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err) + c.Fatalf("error thrown while trying to get container logs: %s, %v", logsOut, err) } containerLogs := strings.TrimSpace(logsOut) if containerLogs != "blahblah" { - t.Errorf("logs didn't print the container's logs %s", containerLogs) + c.Errorf("logs didn't print the container's logs %s", containerLogs) } rmCmd := exec.Command(dockerBinary, "rm", out) if out, _, err = runCommandWithOutput(rmCmd); err != nil { - t.Fatalf("rm failed to remove container: %s, %v", out, err) + c.Fatalf("rm failed to remove container: %s, %v", out, err) } - - logDone("run - pipe in with -i -a stdin") } // the container's ID should be printed when starting a container in detached mode -func TestRunDetachedContainerIDPrinting(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) inspectCmd := exec.Command(dockerBinary, "inspect", out) if inspectOut, _, err := runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("out should've been a container id: %s %v", inspectOut, err) + c.Fatalf("out should've been a container id: %s %v", inspectOut, err) } waitCmd := exec.Command(dockerBinary, "wait", out) if waitOut, _, err := runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) + c.Fatalf("error thrown while waiting for container: %s, %v", waitOut, err) } rmCmd := exec.Command(dockerBinary, "rm", out) rmOut, _, err := runCommandWithOutput(rmCmd) if err != nil { - t.Fatalf("rm failed to remove container: %s, %v", rmOut, err) + c.Fatalf("rm failed to remove container: %s, %v", rmOut, err) } rmOut = strings.TrimSpace(rmOut) if rmOut != out { - t.Errorf("rm didn't print the container ID %s %s", out, rmOut) + c.Errorf("rm didn't print the container ID %s %s", out, rmOut) } - - logDone("run - print container ID in detached mode") } // the working directory should be set correctly -func TestRunWorkingDirectory(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-w", "/root", "busybox", "pwd") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) if out != "/root" { - t.Errorf("-w failed to set working directory") + c.Errorf("-w failed to set working directory") } runCmd = exec.Command(dockerBinary, "run", "--workdir", "/root", "busybox", "pwd") out, _, _, err = runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.TrimSpace(out) if out != "/root" { - t.Errorf("--workdir failed to set working directory") + c.Errorf("--workdir failed to set working directory") } - - logDone("run - run with working directory set by -w/--workdir") } // pinging Google's DNS resolver should fail when we disable the networking -func TestRunWithoutNetworking(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { - t.Fatal(out, err) + c.Fatal(out, err) } if exitCode != 1 { - t.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } runCmd = exec.Command(dockerBinary, "run", "-n=false", "busybox", "ping", "-c", "1", "8.8.8.8") out, _, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 1 { - t.Fatal(out, err) + c.Fatal(out, err) } if exitCode != 1 { - t.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") + c.Errorf("-n=false should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") } - - logDone("run - disable networking with --net=none/-n=false") } //test --link use container name to link target -func TestRunLinksContainerWithContainerName(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "--name", "parent", "busybox") out, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", "parent") ip, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to inspect container: %v, output: %q", err, ip) + c.Fatalf("failed to inspect container: %v, output: %q", err, ip) } ip = strings.TrimSpace(ip) cmd = exec.Command(dockerBinary, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, ip+" test") { - t.Fatalf("use a container name to link target failed") + c.Fatalf("use a container name to link target failed") } - - logDone("run - use a container name to link target work") } //test --link use container id to link target -func TestRunLinksContainerWithContainerId(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-i", "-t", "-d", "busybox") cID, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, cID) + c.Fatalf("failed to run container: %v, output: %q", err, cID) } cID = strings.TrimSpace(cID) cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.NetworkSettings.IPAddress}}", cID) ip, _, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatalf("faild to inspect container: %v, output: %q", err, ip) + c.Fatalf("failed to inspect container: %v, output: %q", err, ip) } ip = strings.TrimSpace(ip) cmd = exec.Command(dockerBinary, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, ip+" test") { - t.Fatalf("use a container id to link target failed") + c.Fatalf("use a container id to link target failed") } - - logDone("run - use a container id to link target work") } -func TestRunLinkToContainerNetMode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "test", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "-d", "--link=parent:parent", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "-d", "--link=child:child", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } - - logDone("run - link to a container which net mode is container success") } -func TestRunModeNetContainerHostname(t *testing.T) { - testRequires(t, ExecSupport) - defer deleteAllContainers() +func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { + testRequires(c, ExecSupport) cmd := exec.Command(dockerBinary, "run", "-i", "-d", "--name", "parent", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "exec", "parent", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to exec command: %v, output: %q", err, out) + c.Fatalf("failed to exec command: %v, output: %q", err, out) } cmd = exec.Command(dockerBinary, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") out1, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out1) + c.Fatalf("failed to run container: %v, output: %q", err, out1) } if out1 != out { - t.Fatal("containers with shared net namespace should have same hostname") + c.Fatal("containers with shared net namespace should have same hostname") } - - logDone("run - containers with shared net namespace have same hostname") } // Regression test for #4741 -func TestRunWithVolumesAsFiles(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithVolumesAsFiles(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/etc/hosts:/target-file", "busybox", "true") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { - t.Fatal("1", out, stderr, err) + c.Fatal("1", out, stderr, err) } runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/target-file") out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { - t.Fatal("2", out, stderr, err) + c.Fatal("2", out, stderr, err) } - - logDone("run - regression test for #4741 - volumes from as files") } // Regression test for #4979 -func TestRunWithVolumesFromExited(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { - t.Fatal("1", out, stderr, err) + c.Fatal("1", out, stderr, err) } runCmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") out, stderr, exitCode, err = runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { - t.Fatal("2", out, stderr, err) + c.Fatal("2", out, stderr, err) } - - logDone("run - regression test for #4979 - volumes-from on exited container") } // Volume path is a symlink which also exists on the host, and the host side is a file not a dir // But the volume call is just a normal volume, not a bind mount -func TestRunCreateVolumesInSymlinkDir(t *testing.T) { - testRequires(t, SameHostDaemon) - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { + testRequires(c, SameHostDaemon) + testRequires(c, NativeExecDriver) name := "test-volume-symlink" dir, err := ioutil.TempDir("", name) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(dir) f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) if err != nil { - t.Fatal(err) + c.Fatal(err) } f.Close() dockerFile := fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) if _, err := buildImage(name, dockerFile, false); err != nil { - t.Fatal(err) + c.Fatal(err) } - defer deleteImages(name) - if out, _, err := dockerCmd(t, "run", "-v", "/test/test", name); err != nil { - t.Fatal(err, out) + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-v", "/test/test", name)) + if err != nil { + c.Fatalf("Failed with errors: %s, %v", out, err) } - - logDone("run - create volume in symlink directory") } // Regression test for #4830 -func TestRunWithRelativePath(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithRelativePath(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-v", "tmp:/other-tmp", "busybox", "true") if _, _, _, err := runCommandWithStdoutStderr(runCmd); err == nil { - t.Fatalf("relative path should result in an error") + c.Fatalf("relative path should result in an error") } - - logDone("run - volume with relative path") } -func TestRunVolumesMountedAsReadonly(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile") if code, err := runCommand(cmd); err == nil || code == 0 { - t.Fatalf("run should fail because volume is ro: exit code %d", code) + c.Fatalf("run should fail because volume is ro: exit code %d", code) } - - logDone("run - volumes as readonly mount") } -func TestRunVolumesFromInReadonlyMode(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunVolumesFromInReadonlyMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:ro", "busybox", "touch", "/test/file") if code, err := runCommand(cmd); err == nil || code == 0 { - t.Fatalf("run should fail because volume is ro: exit code %d", code) + c.Fatalf("run should fail because volume is ro: exit code %d", code) } - - logDone("run - volumes from as readonly mount") } // Regression test for #1201 -func TestRunVolumesFromInReadWriteMode(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err) + c.Fatalf("running --volumes-from parent:rw failed with output: %q\nerror: %v", out, err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:bar", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "invalid mode for volumes-from: bar") { - t.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) + c.Fatalf("running --volumes-from foo:bar should have failed with invalid mount mode: %q", out) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "busybox", "touch", "/test/file") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err) + c.Fatalf("running --volumes-from parent failed with output: %q\nerror: %v", out, err) } - - logDone("run - volumes from as read write mount") } -func TestVolumesFromGetsProperMode(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test:/test:ro", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - // Expect this "rw" mode to be be ignored since the inheritted volume is "ro" + // Expect this "rw" mode to be be ignored since the inherited volume is "ro" cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent:rw", "busybox", "touch", "/test/file") if _, err := runCommand(cmd); err == nil { - t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") } cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/test:/test:ro", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } // Expect this to be read-only since both are "ro" cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent2:ro", "busybox", "touch", "/test/file") if _, err := runCommand(cmd); err == nil { - t.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") + c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") } - - logDone("run - volumes from ignores `rw` if inherrited volume is `ro`") } // Test for GH#10618 -func TestRunNoDupVolumes(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { mountstr1 := randomUnixTmpDirPath("test1") + ":/someplace" mountstr2 := randomUnixTmpDirPath("test2") + ":/someplace" cmd := exec.Command(dockerBinary, "run", "-v", mountstr1, "-v", mountstr2, "busybox", "true") if out, _, err := runCommandWithOutput(cmd); err == nil { - t.Fatal("Expected error about duplicate volume definitions") + c.Fatal("Expected error about duplicate volume definitions") } else { if !strings.Contains(out, "Duplicate volume") { - t.Fatalf("Expected 'duplicate volume' error, got %v", err) + c.Fatalf("Expected 'duplicate volume' error, got %v", err) } } - - logDone("run - don't allow multiple (bind) volumes on the same container target") } // Test for #1351 -func TestRunApplyVolumesFromBeforeVolumes(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "parent", "-v", "/test", "busybox", "touch", "/test/foo") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent", "-v", "/test", "busybox", "cat", "/test/foo") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - logDone("run - volumes from mounted first") } -func TestRunMultipleVolumesFrom(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--name", "parent1", "-v", "/test", "busybox", "touch", "/test/foo") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--name", "parent2", "-v", "/other", "busybox", "touch", "/other/bar") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - - logDone("run - multiple volumes from") } // this tests verifies the ID format for the container -func TestRunVerifyContainerID(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, exit, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } if exit != 0 { - t.Fatalf("expected exit code 0 received %d", exit) + c.Fatalf("expected exit code 0 received %d", exit) } match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) if err != nil { - t.Fatal(err) + c.Fatal(err) } if !match { - t.Fatalf("Invalid container ID: %s", out) + c.Fatalf("Invalid container ID: %s", out) } - - logDone("run - verify container ID") } // Test that creating a container with a volume doesn't crash. Regression test for #995. -func TestRunCreateVolume(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCreateVolume(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-v", "/var/lib/data", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - - logDone("run - create docker managed volume") } // Test that creating a volume with a symlink in its path works correctly. Test for #5152. // Note that this bug happens only with symlinks with a target that starts with '/'. -func TestRunCreateVolumeWithSymlink(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { image := "docker-test-createvolumewithsymlink" - defer deleteImages(image) buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox @@ -704,43 +610,38 @@ func TestRunCreateVolumeWithSymlink(t *testing.T) { buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { - t.Fatalf("could not build '%s': %v", image, err) + c.Fatalf("could not build '%s': %v", image, err) } cmd := exec.Command(dockerBinary, "run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { - t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } var volPath string cmd = exec.Command(dockerBinary, "inspect", "-f", "{{range .Volumes}}{{.}}{{end}}", "test-createvolumewithsymlink") volPath, exitCode, err = runCommandWithOutput(cmd) if err != nil || exitCode != 0 { - t.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode) + c.Fatalf("[inspect] err: %v, exitcode: %d", err, exitCode) } cmd = exec.Command(dockerBinary, "rm", "-v", "test-createvolumewithsymlink") exitCode, err = runCommand(cmd) if err != nil || exitCode != 0 { - t.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) + c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) } f, err := os.Open(volPath) defer f.Close() if !os.IsNotExist(err) { - t.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) + c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) } - - logDone("run - create volume with symlink") } // Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. -func TestRunVolumesFromSymlinkPath(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { name := "docker-test-volumesfromsymlinkpath" - defer deleteImages(name) buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = strings.NewReader(`FROM busybox @@ -749,166 +650,131 @@ func TestRunVolumesFromSymlinkPath(t *testing.T) { buildCmd.Dir = workingDirectory err := buildCmd.Run() if err != nil { - t.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) + c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) } cmd := exec.Command(dockerBinary, "run", "--name", "test-volumesfromsymlinkpath", name) exitCode, err := runCommand(cmd) if err != nil || exitCode != 0 { - t.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) + c.Fatalf("[run] (volume) err: %v, exitcode: %d", err, exitCode) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls /foo | grep -q bar") exitCode, err = runCommand(cmd) if err != nil || exitCode != 0 { - t.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) + c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) } - - logDone("run - volumes-from symlink path") } -func TestRunExitCode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunExitCode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "/bin/sh", "-c", "exit 72") exit, err := runCommand(cmd) if err == nil { - t.Fatal("should not have a non nil error") + c.Fatal("should not have a non nil error") } if exit != 72 { - t.Fatalf("expected exit code 72 received %d", exit) + c.Fatalf("expected exit code 72 received %d", exit) } - - logDone("run - correct exit code") } -func TestRunUserDefaultsToRoot(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserDefaultsToRoot(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root)") { - t.Fatalf("expected root user got %s", out) + c.Fatalf("expected root user got %s", out) } - - logDone("run - default user") } -func TestRunUserByName(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserByName(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "root", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root)") { - t.Fatalf("expected root user got %s", out) + c.Fatalf("expected root user got %s", out) } - - logDone("run - user by name") } -func TestRunUserByID(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserByID(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "1", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { - t.Fatalf("expected daemon user got %s", out) + c.Fatalf("expected daemon user got %s", out) } - - logDone("run - user by id") } -func TestRunUserByIDBig(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "2147483648", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal("No error, but must be.", out) + c.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { - t.Fatalf("expected error about uids range, got %s", out) + c.Fatalf("expected error about uids range, got %s", out) } - - logDone("run - user by id, id too big") } -func TestRunUserByIDNegative(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "-1", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal("No error, but must be.", out) + c.Fatal("No error, but must be.", out) } if !strings.Contains(out, "Uids and gids must be in range") { - t.Fatalf("expected error about uids range, got %s", out) + c.Fatalf("expected error about uids range, got %s", out) } - - logDone("run - user by id, id negative") } -func TestRunUserByIDZero(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "0", "busybox", "id") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { - t.Fatalf("expected daemon user got %s", out) + c.Fatalf("expected daemon user got %s", out) } - - logDone("run - user by id, zero uid") } -func TestRunUserNotFound(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUserNotFound(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-u", "notme", "busybox", "id") _, err := runCommand(cmd) if err == nil { - t.Fatal("unknown user should cause container to fail") + c.Fatal("unknown user should cause container to fail") } - - logDone("run - user not found") } -func TestRunTwoConcurrentContainers(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { group := sync.WaitGroup{} group.Add(2) + errChan := make(chan error, 2) for i := 0; i < 2; i++ { go func() { defer group.Done() cmd := exec.Command(dockerBinary, "run", "busybox", "sleep", "2") - if _, err := runCommand(cmd); err != nil { - t.Fatal(err) - } + _, err := runCommand(cmd) + errChan <- err }() } group.Wait() + close(errChan) - logDone("run - two concurrent containers") + for err := range errChan { + c.Assert(err, check.IsNil) + } } -func TestRunEnvironment(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunEnvironment(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") cmd.Env = append(os.Environ(), "TRUE=false", @@ -917,7 +783,7 @@ func TestRunEnvironment(t *testing.T) { out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") @@ -941,29 +807,26 @@ func TestRunEnvironment(t *testing.T) { } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { - t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { - t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } - - logDone("run - verify environment") } -func TestRunEnvironmentErase(t *testing.T) { +func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { // Test to make sure that when we use -e on env vars that are // not set in our local env that they're removed (if present) in // the container - defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") cmd.Env = appendBaseEnv([]string{}) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") @@ -981,28 +844,25 @@ func TestRunEnvironmentErase(t *testing.T) { } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { - t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { - t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } - - logDone("run - verify environment erase") } -func TestRunEnvironmentOverride(t *testing.T) { +func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { // Test to make sure that when we use -e on env vars that are // already in the env that we're overriding them - defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") cmd.Env = appendBaseEnv([]string{"HOSTNAME=bar"}) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actualEnvLxc := strings.Split(strings.TrimSpace(out), "\n") @@ -1021,60 +881,47 @@ func TestRunEnvironmentOverride(t *testing.T) { } sort.Strings(goodEnv) if len(goodEnv) != len(actualEnv) { - t.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) + c.Fatalf("Wrong environment: should be %d variables, not: %q\n", len(goodEnv), strings.Join(actualEnv, ", ")) } for i := range goodEnv { if actualEnv[i] != goodEnv[i] { - t.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) + c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) } } - - logDone("run - verify environment override") } -func TestRunContainerNetwork(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - - logDone("run - test container network via ping") } // Issue #4681 -func TestRunLoopbackWhenNetworkDisabled(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - - logDone("run - test container loopback when networking disabled") } -func TestRunNetHostNotAllowedWithLinks(t *testing.T) { - defer deleteAllContainers() - - _, _, err := dockerCmd(t, "run", "--name", "linked", "busybox", "true") +func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name", "linked", "busybox", "true")) + if err != nil { + c.Fatalf("Failed with errors: %s, %v", out, err) + } cmd := exec.Command(dockerBinary, "run", "--net=host", "--link", "linked:linked", "busybox", "true") _, _, err = runCommandWithOutput(cmd) if err == nil { - t.Fatal("Expected error") + c.Fatal("Expected error") } - - logDone("run - don't allow --net=host to be used with links") } -func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } var ( @@ -1089,14 +936,12 @@ func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { } if count != 1 { - t.Fatalf("Wrong interface count in container %d", count) + c.Fatalf("Wrong interface count in container %d", count) } if !strings.HasPrefix(out, "1: lo") { - t.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) + c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) } - - logDone("run - test loopback only exists when networking disabled") } // #7851 hostname outside container shows FQDN, inside only shortname @@ -1104,293 +949,220 @@ func TestRunLoopbackOnlyExistsWhenNetworkingDisabled(t *testing.T) { // and use "--net=host" (as the original issue submitter did), as the same // codepath is executed with "docker run -h ". Both were manually // tested, but this testcase takes the simpler path of using "run -h .." -func TestRunFullHostnameSet(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-h", "foo.bar.baz", "busybox", "hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { - t.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) + c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) } - - logDone("run - test fully qualified hostname set with -h") } -func TestRunPrivilegedCanMknod(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test privileged can mknod") } -func TestRunUnPrivilegedCanMknod(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUnPrivilegedCanMknod(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test un-privileged can mknod") } -func TestRunCapDropInvalid(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-drop=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } - - logDone("run - test --cap-drop=CHPASS invalid") } -func TestRunCapDropCannotMknod(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) + c.Fatalf("expected output not ok received %s", actual) } - - logDone("run - test --cap-drop=MKNOD cannot mknod") } -func TestRunCapDropCannotMknodLowerCase(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) + c.Fatalf("expected output not ok received %s", actual) } - - logDone("run - test --cap-drop=mknod cannot mknod lowercase") } -func TestRunCapDropALLCannotMknod(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) + c.Fatalf("expected output not ok received %s", actual) } - - logDone("run - test --cap-drop=ALL cannot mknod") } -func TestRunCapDropALLAddMknodCanMknod(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test --cap-drop=ALL --cap-add=MKNOD can mknod") } -func TestRunCapAddInvalid(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-add=CHPASS", "busybox", "ls") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } - - logDone("run - test --cap-add=CHPASS invalid") } -func TestRunCapAddCanDownInterface(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test --cap-add=NET_ADMIN can set eth0 down") } -func TestRunCapAddALLCanDownInterface(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test --cap-add=ALL can set eth0 down") } -func TestRunCapAddALLDropNetAdminCanDownInterface(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) + c.Fatalf("expected output not ok received %s", actual) } - - logDone("run - test --cap-add=ALL --cap-drop=NET_ADMIN cannot set eth0 down") } -func TestRunPrivilegedCanMount(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != "ok" { - t.Fatalf("expected output ok received %s", actual) + c.Fatalf("expected output ok received %s", actual) } - - logDone("run - test privileged can mount") } -func TestRunUnPrivilegedCannotMount(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUnPrivilegedCannotMount(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual == "ok" { - t.Fatalf("expected output not ok received %s", actual) + c.Fatalf("expected output not ok received %s", actual) } - - logDone("run - test un-privileged cannot mount") } -func TestRunSysNotWritableInNonPrivilegedContainers(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err == nil || code == 0 { - t.Fatal("sys should not be writable in a non privileged container") + c.Fatal("sys should not be writable in a non privileged container") } - - logDone("run - sys not writable in non privileged container") } -func TestRunSysWritableInPrivilegedContainers(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/sys/kernel/profiling") if code, err := runCommand(cmd); err != nil || code != 0 { - t.Fatalf("sys should be writable in privileged container") + c.Fatalf("sys should be writable in privileged container") } - - logDone("run - sys writable in privileged container") } -func TestRunProcNotWritableInNonPrivilegedContainers(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "touch", "/proc/sysrq-trigger") if code, err := runCommand(cmd); err == nil || code == 0 { - t.Fatal("proc should not be writable in a non privileged container") + c.Fatal("proc should not be writable in a non privileged container") } - - logDone("run - proc not writable in non privileged container") } -func TestRunProcWritableInPrivilegedContainers(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "touch", "/proc/sysrq-trigger") if code, err := runCommand(cmd); err != nil || code != 0 { - t.Fatalf("proc should be writable in privileged container") + c.Fatalf("proc should be writable in privileged container") } - logDone("run - proc writable in privileged container") } -func TestRunWithCpuset(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithCpuset(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cpuset", "0", "busybox", "true") if code, err := runCommand(cmd); err != nil || code != 0 { - t.Fatalf("container should run successfuly with cpuset of 0: %s", err) + c.Fatalf("container should run successfully with cpuset of 0: %s", err) } - - logDone("run - cpuset 0") } -func TestRunWithCpusetCpus(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--cpuset-cpus", "0", "busybox", "true") if code, err := runCommand(cmd); err != nil || code != 0 { - t.Fatalf("container should run successfuly with cpuset-cpus of 0: %s", err) + c.Fatalf("container should run successfully with cpuset-cpus of 0: %s", err) } - - logDone("run - cpuset-cpus 0") } -func TestRunDeviceNumbers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "--cpuset-mems", "0", "busybox", "true") + if code, err := runCommand(cmd); err != nil || code != 0 { + c.Fatalf("container should run successfully with cpuset-mems of 0: %s", err) + } +} +func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "ls -l /dev/null") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } deviceLineFields := strings.Fields(out) deviceLineFields[6] = "" @@ -1399,137 +1171,107 @@ func TestRunDeviceNumbers(t *testing.T) { expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} if !(reflect.DeepEqual(deviceLineFields, expected)) { - t.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) + c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) } - - logDone("run - test device numbers") } -func TestRunThatCharacterDevicesActLikeCharacterDevices(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { - t.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) + c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) } - - logDone("run - test that character devices work.") } -func TestRunUnprivilegedWithChroot(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { cmd := exec.Command(dockerBinary, "run", "busybox", "chroot", "/", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } - - logDone("run - unprivileged with chroot") } -func TestRunAddingOptionalDevices(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { - t.Fatalf("expected output /dev/nulo, received %s", actual) + c.Fatalf("expected output /dev/nulo, received %s", actual) } - - logDone("run - test --device argument") } -func TestRunModeHostname(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunModeHostname(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { - t.Fatalf("expected 'testhostname', but says: %q", actual) + c.Fatalf("expected 'testhostname', but says: %q", actual) } cmd = exec.Command(dockerBinary, "run", "--net=host", "busybox", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } hostname, err := os.Hostname() if err != nil { - t.Fatal(err) + c.Fatal(err) } if actual := strings.Trim(out, "\r\n"); actual != hostname { - t.Fatalf("expected %q, but says: %q", hostname, actual) + c.Fatalf("expected %q, but says: %q", hostname, actual) } - - logDone("run - hostname and several network modes") } -func TestRunRootWorkdir(t *testing.T) { - defer deleteAllContainers() - - s, _, err := dockerCmd(t, "run", "--workdir", "/", "busybox", "pwd") +func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--workdir", "/", "busybox", "pwd")) if err != nil { - t.Fatal(s, err) + c.Fatalf("Failed with errors: %s, %v", out, err) } - if s != "/\n" { - t.Fatalf("pwd returned %q (expected /\\n)", s) + if out != "/\n" { + c.Fatalf("pwd returned %q (expected /\\n)", s) } - - logDone("run - workdir /") } -func TestRunAllowBindMountingRoot(t *testing.T) { - defer deleteAllContainers() - - s, _, err := dockerCmd(t, "run", "-v", "/:/host", "busybox", "ls", "/host") +func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { + out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-v", "/:/host", "busybox", "ls", "/host")) if err != nil { - t.Fatal(s, err) + c.Fatalf("Failed with errors: %s, %v", out, err) } - - logDone("run - bind mount / as volume") } -func TestRunDisallowBindMountingRootToRoot(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-v", "/:/", "busybox", "ls", "/host") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - logDone("run - bind mount /:/ as volume should not work") } // Verify that a container gets default DNS when only localhost resolvers exist -func TestRunDnsDefaultOptions(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) { + testRequires(c, SameHostDaemon) // preserve original resolv.conf for restoring after test origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { - t.Fatalf("/etc/resolv.conf does not exist") + c.Fatalf("/etc/resolv.conf does not exist") } // defer restored original conf defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } }() @@ -1538,14 +1280,14 @@ func TestRunDnsDefaultOptions(t *testing.T) { // GetNameservers(), leading to a replacement of nameservers with the default set tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") actual, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, actual) + c.Fatal(err, actual) } // check that the actual defaults are appended to the commented out @@ -1553,54 +1295,47 @@ func TestRunDnsDefaultOptions(t *testing.T) { // NOTE: if we ever change the defaults from google dns, this will break expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4" if actual != expected { - t.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) + c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) } - - logDone("run - dns default options") } -func TestRunDnsOptions(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunDnsOptions(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") out, stderr, _, err := runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } // The client will get a warning on stderr when setting DNS to a localhost address; verify this: if !strings.Contains(stderr, "Localhost DNS setting") { - t.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) + c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) } actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) if actual != "nameserver 127.0.0.1 search mydomain" { - t.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) + c.Fatalf("expected 'nameserver 127.0.0.1 search mydomain', but says: %q", actual) } cmd = exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "--dns-search=.", "busybox", "cat", "/etc/resolv.conf") out, _, _, err = runCommandWithStdoutStderr(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) if actual != "nameserver 127.0.0.1" { - t.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) + c.Fatalf("expected 'nameserver 127.0.0.1', but says: %q", actual) } - - logDone("run - dns options") } -func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { + testRequires(c, SameHostDaemon) origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { - t.Fatalf("/etc/resolv.conf does not exist") + c.Fatalf("/etc/resolv.conf does not exist") } hostNamservers := resolvconf.GetNameservers(origResolvConf) @@ -1609,58 +1344,58 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { var out string cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") if out, _, _, err = runCommandWithStdoutStderr(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actualNameservers := resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "127.0.0.1" { - t.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) + c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) } actualSearch := resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { - t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } cmd = exec.Command(dockerBinary, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") if out, _, err = runCommandWithOutput(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actualNameservers := resolvconf.GetNameservers([]byte(out)) if len(actualNameservers) != len(hostNamservers) { - t.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) + c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNamservers), len(actualNameservers)) } for i := range actualNameservers { if actualNameservers[i] != hostNamservers[i] { - t.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) + c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNamservers[i]) } } if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { - t.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) + c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) } // test with file tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } // put the old resolvconf back defer func() { if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } }() resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") if os.IsNotExist(err) { - t.Fatalf("/etc/resolv.conf does not exist") + c.Fatalf("/etc/resolv.conf does not exist") } hostNamservers = resolvconf.GetNameservers(resolvConf) @@ -1669,35 +1404,32 @@ func TestRunDnsOptionsBasedOnHostResolvConf(t *testing.T) { cmd = exec.Command(dockerBinary, "run", "busybox", "cat", "/etc/resolv.conf") if out, _, err = runCommandWithOutput(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actualNameservers = resolvconf.GetNameservers([]byte(out)); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { - t.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) + c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) } actualSearch = resolvconf.GetSearchDomains([]byte(out)) if len(actualSearch) != len(hostSearch) { - t.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) + c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) } for i := range actualSearch { if actualSearch[i] != hostSearch[i] { - t.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) + c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) } } - defer deleteAllContainers() - - logDone("run - dns options based on host resolv.conf") } // Test the file watch notifier on docker host's /etc/resolv.conf // A go-routine is responsible for auto-updating containers which are // stopped and have an unmodified copy of resolv.conf, as well as // marking running containers as requiring an update on next restart -func TestRunResolvconfUpdater(t *testing.T) { +func (s *DockerSuite) TestRunResolvconfUpdater(c *check.C) { // Because overlay doesn't support inotify properly, we need to skip // this test if the docker daemon has Storage Driver == overlay - testRequires(t, SameHostDaemon, NotOverlay) + testRequires(c, SameHostDaemon, NotOverlay) tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78") tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") @@ -1705,97 +1437,96 @@ func TestRunResolvconfUpdater(t *testing.T) { //take a copy of resolv.conf for restoring after test completes resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } // This test case is meant to test monitoring resolv.conf when it is - // a regular file not a bind mount. So we unmount resolv.conf and replace + // a regular file not a bind mounc. So we unmount resolv.conf and replace // it with a file containing the original settings. cmd := exec.Command("umount", "/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } //cleanup defer func() { - deleteAllContainers() if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } }() //1. test that a non-running container gets an updated resolv.conf cmd = exec.Command(dockerBinary, "run", "--name='first'", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } containerID1, err := getIDByName("first") if err != nil { - t.Fatal(err) + c.Fatal(err) } // replace resolv.conf with our temporary copy bytesResolvConf := []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second / 2) // check for update in container containerResolv, err := readContainerFile(containerID1, "resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { - t.Fatalf("Stopped container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + c.Fatalf("Stopped container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } //2. test that a non-running container does not receive resolv.conf updates // if it modified the container copy of the starting point resolv.conf cmd = exec.Command(dockerBinary, "run", "--name='second'", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") if _, err = runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } containerID2, err := getIDByName("second") if err != nil { - t.Fatal(err) + c.Fatal(err) } containerResolvHashBefore, err := readContainerFile(containerID2, "resolv.conf.hash") if err != nil { - t.Fatal(err) + c.Fatal(err) } //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second / 2) containerResolvHashAfter, err := readContainerFile(containerID2, "resolv.conf.hash") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) { - t.Fatalf("Stopped container with modified resolv.conf should not have been updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) + c.Fatalf("Stopped container with modified resolv.conf should not have been updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) } //3. test that a running container's resolv.conf is not modified while running cmd = exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } runningContainerID := strings.TrimSpace(out) containerResolvHashBefore, err = readContainerFile(runningContainerID, "resolv.conf.hash") if err != nil { - t.Fatal(err) + c.Fatal(err) } // replace resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } // make sure the updater has time to run to validate we really aren't @@ -1803,27 +1534,27 @@ func TestRunResolvconfUpdater(t *testing.T) { time.Sleep(time.Second / 2) containerResolvHashAfter, err = readContainerFile(runningContainerID, "resolv.conf.hash") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(containerResolvHashBefore, containerResolvHashAfter) { - t.Fatalf("Running container's resolv.conf should not be updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) + c.Fatalf("Running container's resolv.conf should not be updated; expected hash: %v, new hash: %v", containerResolvHashBefore, containerResolvHashAfter) } //4. test that a running container's resolv.conf is updated upon restart // (the above container is still running..) cmd = exec.Command(dockerBinary, "restart", runningContainerID) if _, err = runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } // check for update in container containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { - t.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) + c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) } //5. test that additions of a localhost resolver are cleaned from @@ -1832,7 +1563,7 @@ func TestRunResolvconfUpdater(t *testing.T) { // replace resolv.conf with a localhost-only nameserver copy bytesResolvConf = []byte(tmpLocalhostResolvConf) if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second / 2) @@ -1840,12 +1571,12 @@ func TestRunResolvconfUpdater(t *testing.T) { // after the cleanup of resolv.conf found only a localhost nameserver: containerResolv, err = readContainerFile(containerID1, "resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4" if !bytes.Equal(containerResolv, []byte(expected)) { - t.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) + c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) } //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update @@ -1853,194 +1584,170 @@ func TestRunResolvconfUpdater(t *testing.T) { // Restore the original resolv.conf if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } // Run the container so it picks up the old settings cmd = exec.Command(dockerBinary, "run", "--name='third'", "busybox", "true") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } containerID3, err := getIDByName("third") if err != nil { - t.Fatal(err) + c.Fatal(err) } // Create a modified resolv.conf.aside and override resolv.conf with it bytesResolvConf = []byte(tmpResolvConf) if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } time.Sleep(time.Second / 2) // check for update in container containerResolv, err = readContainerFile(containerID3, "resolv.conf") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !bytes.Equal(containerResolv, bytesResolvConf) { - t.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) + c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) } //cleanup, restore original resolv.conf happens in defer func() - logDone("run - resolv.conf updater") } -func TestRunAddHost(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunAddHost(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } actual := strings.Trim(out, "\r\n") if actual != "86.75.30.9\textra" { - t.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) + c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) } - - logDone("run - add-host option") } // Regression test for #6983 -func TestRunAttachStdErrOnlyTTYMode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stderr", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } else if exitCode != 0 { - t.Fatalf("Container should have exited with error code 0") + c.Fatalf("Container should have exited with error code 0") } - - logDone("run - Attach stderr only with -t") } // Regression test for #6983 -func TestRunAttachStdOutOnlyTTYMode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } else if exitCode != 0 { - t.Fatalf("Container should have exited with error code 0") + c.Fatalf("Container should have exited with error code 0") } - - logDone("run - Attach stdout only with -t") } // Regression test for #6983 -func TestRunAttachStdOutAndErrTTYMode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") exitCode, err := runCommand(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } else if exitCode != 0 { - t.Fatalf("Container should have exited with error code 0") + c.Fatalf("Container should have exited with error code 0") } - - logDone("run - Attach stderr and stdout with -t") } // Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode // but using --attach instead of -a to make sure we read the flag correctly -func TestRunAttachWithDettach(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAttachWithDettach(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") _, stderr, _, err := runCommandWithStdoutStderr(cmd) if err == nil { - t.Fatal("Container should have exited with error code different than 0") + c.Fatal("Container should have exited with error code different than 0") } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { - t.Fatal("Should have been returned an error with conflicting options -a and -d") + c.Fatal("Should have been returned an error with conflicting options -a and -d") } - - logDone("run - Attach stdout with -d") } -func TestRunState(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunState(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") if err != nil { - t.Fatal(err) + c.Fatal(err) } if state != "true" { - t.Fatal("Container state is 'not running'") + c.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } if pid1 == "0" { - t.Fatal("Container state Pid 0") + c.Fatal("Container state Pid 0") } cmd = exec.Command(dockerBinary, "stop", id) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } state, err = inspectField(id, "State.Running") if err != nil { - t.Fatal(err) + c.Fatal(err) } if state != "false" { - t.Fatal("Container state is 'running'") + c.Fatal("Container state is 'running'") } pid2, err := inspectField(id, "State.Pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } if pid2 == pid1 { - t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } cmd = exec.Command(dockerBinary, "start", id) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } state, err = inspectField(id, "State.Running") if err != nil { - t.Fatal(err) + c.Fatal(err) } if state != "true" { - t.Fatal("Container state is 'not running'") + c.Fatal("Container state is 'not running'") } pid3, err := inspectField(id, "State.Pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } if pid3 == pid1 { - t.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) + c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) } - logDone("run - test container state.") } // Test for #1737 -func TestRunCopyVolumeUidGid(t *testing.T) { +func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) { name := "testrunvolumesuidgid" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd @@ -2048,178 +1755,160 @@ func TestRunCopyVolumeUidGid(t *testing.T) { RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Test that the uid and gid is copied from the image to the volume cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out = strings.TrimSpace(out) if out != "dockerio:dockerio" { - t.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) + c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) } - - logDone("run - copy uid/gid for volume") } // Test for #1582 -func TestRunCopyVolumeContent(t *testing.T) { +func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { name := "testruncopyvolumecontent" - defer deleteImages(name) - defer deleteAllContainers() _, err := buildImage(name, `FROM busybox RUN mkdir -p /hello/local && echo hello > /hello/local/world`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Test that the content is copied from the image to the volume cmd := exec.Command(dockerBinary, "run", "--rm", "-v", "/hello", name, "find", "/hello") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { - t.Fatal("Container failed to transfer content to volume") + c.Fatal("Container failed to transfer content to volume") } - logDone("run - copy volume content") } -func TestRunCleanupCmdOnEntrypoint(t *testing.T) { +func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { name := "testrunmdcleanuponentrypoint" - defer deleteImages(name) - defer deleteAllContainers() if _, err := buildImage(name, `FROM busybox ENTRYPOINT ["echo"] CMD ["testingpoint"]`, true); err != nil { - t.Fatal(err) + c.Fatal(err) } runCmd := exec.Command(dockerBinary, "run", "--entrypoint", "whoami", name) out, exit, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("Error: %v, out: %q", err, out) + c.Fatalf("Error: %v, out: %q", err, out) } if exit != 0 { - t.Fatalf("expected exit code 0 received %d, out: %q", exit, out) + c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) } out = strings.TrimSpace(out) if out != "root" { - t.Fatalf("Expected output root, got %q", out) + c.Fatalf("Expected output root, got %q", out) } - logDone("run - cleanup cmd on --entrypoint") } // TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected -func TestRunWorkdirExistsAndIsFile(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-w", "/bin/cat", "busybox") out, exit, err := runCommandWithOutput(runCmd) if !(err != nil && exit == 1 && strings.Contains(out, "Cannot mkdir: /bin/cat is not a directory")) { - t.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) + c.Fatalf("Docker must complains about making dir, but we got out: %s, exit: %d, err: %s", out, exit, err) } - logDone("run - error on existing file for workdir") } -func TestRunExitOnStdinClose(t *testing.T) { +func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { name := "testrunexitonstdinclose" - defer deleteAllContainers() runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", "/bin/cat") stdin, err := runCmd.StdinPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } stdout, err := runCmd.StdoutPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } if err := runCmd.Start(); err != nil { - t.Fatal(err) + c.Fatal(err) } if _, err := stdin.Write([]byte("hello\n")); err != nil { - t.Fatal(err) + c.Fatal(err) } r := bufio.NewReader(stdout) line, err := r.ReadString('\n') if err != nil { - t.Fatal(err) + c.Fatal(err) } line = strings.TrimSpace(line) if line != "hello" { - t.Fatalf("Output should be 'hello', got '%q'", line) + c.Fatalf("Output should be 'hello', got '%q'", line) } if err := stdin.Close(); err != nil { - t.Fatal(err) + c.Fatal(err) } - finish := make(chan struct{}) + finish := make(chan error) go func() { - if err := runCmd.Wait(); err != nil { - t.Fatal(err) - } + finish <- runCmd.Wait() close(finish) }() select { - case <-finish: + case err := <-finish: + c.Assert(err, check.IsNil) case <-time.After(1 * time.Second): - t.Fatal("docker run failed to exit on stdin close") + c.Fatal("docker run failed to exit on stdin close") } state, err := inspectField(name, "State.Running") - if err != nil { - t.Fatal(err) - } + c.Assert(err, check.IsNil) + if state != "false" { - t.Fatal("Container must be stopped after stdin closing") + c.Fatal("Container must be stopped after stdin closing") } - logDone("run - exit on stdin closing") } // Test for #2267 -func TestRunWriteHostsFileAndNotCommit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { name := "writehosts" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "test2267") { - t.Fatal("/etc/hosts should contain 'test2267'") + c.Fatal("/etc/hosts should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { - t.Fatal("diff should be empty") + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") } - - logDone("run - write to /etc/hosts and not commited") } -func eqToBaseDiff(out string, t *testing.T) bool { +func eqToBaseDiff(out string, c *check.C) bool { cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "echo", "hello") out1, _, err := runCommandWithOutput(cmd) cID := strings.TrimSpace(out1) cmd = exec.Command(dockerBinary, "diff", cID) baseDiff, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, baseDiff) + c.Fatal(err, baseDiff) } baseArr := strings.Split(baseDiff, "\n") sort.Strings(baseArr) @@ -2243,346 +1932,299 @@ func sliceEq(a, b []string) bool { } // Test for #2267 -func TestRunWriteHostnameFileAndNotCommit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { name := "writehostname" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "test2267") { - t.Fatal("/etc/hostname should contain 'test2267'") + c.Fatal("/etc/hostname should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { - t.Fatal("diff should be empty") + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") } - - logDone("run - write to /etc/hostname and not commited") } // Test for #2267 -func TestRunWriteResolvFileAndNotCommit(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { name := "writeresolv" cmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "test2267") { - t.Fatal("/etc/resolv.conf should contain 'test2267'") + c.Fatal("/etc/resolv.conf should contain 'test2267'") } cmd = exec.Command(dockerBinary, "diff", name) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, t) { - t.Fatal("diff should be empty") + if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { + c.Fatal("diff should be empty") } - - logDone("run - write to /etc/resolv.conf and not commited") } -func TestRunWithBadDevice(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { name := "baddevice" cmd := exec.Command(dockerBinary, "run", "--name", name, "--device", "/etc", "busybox", "true") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatal("Run should fail with bad device") + c.Fatal("Run should fail with bad device") } expected := `\"/etc\": not a device node` if !strings.Contains(out, expected) { - t.Fatalf("Output should contain %q, actual out: %q", expected, out) + c.Fatalf("Output should contain %q, actual out: %q", expected, out) } - logDone("run - error with bad device") } -func TestRunEntrypoint(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunEntrypoint(c *check.C) { name := "entrypoint" cmd := exec.Command(dockerBinary, "run", "--name", name, "--entrypoint", "/bin/echo", "busybox", "-n", "foobar") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } expected := "foobar" if out != expected { - t.Fatalf("Output should be %q, actual out: %q", expected, out) + c.Fatalf("Output should be %q, actual out: %q", expected, out) } - logDone("run - entrypoint") } -func TestRunBindMounts(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunBindMounts(c *check.C) { + testRequires(c, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "docker-test-container") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) - writeFile(path.Join(tmpDir, "touch-me"), "", t) + writeFile(path.Join(tmpDir, "touch-me"), "", c) // Test reading from a read-only bind mount cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox", "ls", "/tmp") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if !strings.Contains(out, "touch-me") { - t.Fatal("Container failed to read from bind mount") + c.Fatal("Container failed to read from bind mount") } // test writing to bind mount cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist // test mounting to an illegal destination directory cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") _, err = runCommand(cmd) if err == nil { - t.Fatal("Container bind mounted illegal directory") + c.Fatal("Container bind mounted illegal directory") } // test mount a file cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") _, err = runCommand(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - content := readFile(path.Join(tmpDir, "holla"), t) // Will fail if the file doesn't exist + content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist expected := "yotta" if content != expected { - t.Fatalf("Output should be %q, actual out: %q", expected, content) + c.Fatalf("Output should be %q, actual out: %q", expected, content) } - - logDone("run - bind mounts") } // Ensure that CIDFile gets deleted if it's empty // Perform this test by making `docker run` fail -func TestRunCidFileCleanupIfEmpty(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpCidFile := path.Join(tmpDir, "cid") cmd := exec.Command(dockerBinary, "run", "--cidfile", tmpCidFile, "emptyfs") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatalf("Run without command must fail. out=%s", out) + c.Fatalf("Run without command must fail. out=%s", out) } else if !strings.Contains(out, "No command specified") { - t.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) + c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) } if _, err := os.Stat(tmpCidFile); err == nil { - t.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) + c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) } - logDone("run - cleanup empty cidfile on error") } // #2098 - Docker cidFiles only contain short version of the containerId -//sudo docker run --cidfile /tmp/docker_test.cid ubuntu echo "test" +//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" // TestRunCidFile tests that run --cidfile returns the longid -func TestRunCidFileCheckIDLength(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { tmpDir, err := ioutil.TempDir("", "TestRunCidFile") if err != nil { - t.Fatal(err) + c.Fatal(err) } tmpCidFile := path.Join(tmpDir, "cid") defer os.RemoveAll(tmpDir) cmd := exec.Command(dockerBinary, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } id := strings.TrimSpace(out) buffer, err := ioutil.ReadFile(tmpCidFile) if err != nil { - t.Fatal(err) + c.Fatal(err) } cid := string(buffer) if len(cid) != 64 { - t.Fatalf("--cidfile should be a long id, not %q", id) + c.Fatalf("--cidfile should be a long id, not %q", id) } if cid != id { - t.Fatalf("cid must be equal to %s, got %s", id, cid) + c.Fatalf("cid must be equal to %s, got %s", id, cid) } - - logDone("run - cidfile contains long id") } -func TestRunNetworkNotInitializedNoneMode(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--net=none", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } id := strings.TrimSpace(out) res, err := inspectField(id, "NetworkSettings.IPAddress") if err != nil { - t.Fatal(err) + c.Fatal(err) } if res != "" { - t.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) + c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) } - - logDone("run - network must not be initialized in 'none' mode") } -func TestRunSetMacAddress(t *testing.T) { +func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { mac := "12:34:56:78:9a:bc" - defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") out, ec, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("exec failed:\nexit code=%v\noutput=%s", ec, out) + c.Fatalf("exec failed:\nexit code=%v\noutput=%s", ec, out) } actualMac := strings.TrimSpace(out) if actualMac != mac { - t.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) + c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) } - - logDone("run - setting MAC address with --mac-address") } -func TestRunInspectMacAddress(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { mac := "12:34:56:78:9a:bc" cmd := exec.Command(dockerBinary, "run", "-d", "--mac-address="+mac, "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } id := strings.TrimSpace(out) inspectedMac, err := inspectField(id, "NetworkSettings.MacAddress") if err != nil { - t.Fatal(err) + c.Fatal(err) } if inspectedMac != mac { - t.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) + c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) } - - logDone("run - inspecting MAC address") } // test docker run use a invalid mac address -func TestRunWithInvalidMacAddress(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--mac-address", "92:d0:c6:0a:29", "busybox") out, _, err := runCommandWithOutput(runCmd) //use a invalid mac address should with a error out if err == nil || !strings.Contains(out, "is not a valid mac address") { - t.Fatalf("run with an invalid --mac-address should with error out") + c.Fatalf("run with an invalid --mac-address should with error out") } - - logDone("run - can't use an invalid mac address") } -func TestRunDeallocatePortOnMissingIptablesRule(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } id := strings.TrimSpace(out) ip, err := inspectField(id, "NetworkSettings.IPAddress") if err != nil { - t.Fatal(err) + c.Fatal(err) } iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") out, _, err = runCommandWithOutput(iptCmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if err := deleteContainer(id); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "-d", "-p", "23:23", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - - logDone("run - port should be deallocated even on iptables error") } -func TestRunPortInUse(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunPortInUse(c *check.C) { + testRequires(c, SameHostDaemon) port := "1234" l, err := net.Listen("tcp", ":"+port) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer l.Close() cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err == nil { - t.Fatalf("Binding on used port must fail") + c.Fatalf("Binding on used port must fail") } if !strings.Contains(out, "address already in use") { - t.Fatalf("Out must be about \"address already in use\", got %s", out) + c.Fatalf("Out must be about \"address already in use\", got %s", out) } - - logDone("run - error out if port already in use") } // https://github.com/docker/docker/issues/8428 -func TestRunPortProxy(t *testing.T) { - testRequires(t, SameHostDaemon) - - defer deleteAllContainers() +func (s *DockerSuite) TestRunPortProxy(c *check.C) { + testRequires(c, SameHostDaemon) port := "12345" cmd := exec.Command(dockerBinary, "run", "-d", "-p", port+":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err) + c.Fatalf("Failed to run and bind port %s, output: %s, error: %s", port, out, err) } - // connect for 10 times here. This will trigger 10 EPIPES in the child + // connett for 10 times here. This will trigger 10 EPIPES in the child // process and kill it when it writes to a closed stdout/stderr for i := 0; i < 10; i++ { net.Dial("tcp", fmt.Sprintf("0.0.0.0:%s", port)) @@ -2591,343 +2233,305 @@ func TestRunPortProxy(t *testing.T) { listPs := exec.Command("sh", "-c", "ps ax | grep docker") out, _, err = runCommandWithOutput(listPs) if err != nil { - t.Errorf("list docker process failed with output %s, error %s", out, err) + c.Errorf("list docker process failed with output %s, error %s", out, err) } if strings.Contains(out, "docker ") { - t.Errorf("Unexpected defunct docker process") + c.Errorf("Unexpected defunct docker process") } if !strings.Contains(out, "docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 12345") { - t.Errorf("Failed to find docker-proxy process, got %s", out) + c.Errorf("Failed to find docker-proxy process, got %s", out) } - - logDone("run - proxy should work with unavailable port") } // Regression test for #7792 -func TestRunMountOrdering(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunMountOrdering(c *check.C) { + testRequires(c, SameHostDaemon) tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir2) - // Create a temporary tmpfs mount. + // Create a temporary tmpfs mounc. fooDir := filepath.Join(tmpDir, "foo") if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { - t.Fatalf("failed to mkdir at %s - %s", fooDir, err) + c.Fatalf("failed to mkdir at %s - %s", fooDir, err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp", tmpDir), "-v", fmt.Sprintf("%s:/tmp/foo", fooDir), "-v", fmt.Sprintf("%s:/tmp/tmp2", tmpDir2), "-v", fmt.Sprintf("%s:/tmp/tmp2/foo", fooDir), "busybox:latest", "sh", "-c", "ls /tmp/touch-me && ls /tmp/foo/touch-me && ls /tmp/tmp2/touch-me && ls /tmp/tmp2/foo/touch-me") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - logDone("run - volumes are mounted in the correct order") } // Regression test for https://github.com/docker/docker/issues/8259 -func TestRunReuseBindVolumeThatIsSymlink(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { + testRequires(c, SameHostDaemon) tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) linkPath := os.TempDir() + "/testlink2" if err := os.Symlink(tmpDir, linkPath); err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(linkPath) // Create first container cmd := exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } // Create second container with same symlinked path // This will fail if the referenced issue is hit with a "Volume exists" error cmd = exec.Command(dockerBinary, "run", "-v", fmt.Sprintf("%s:/tmp/test", linkPath), "busybox", "ls", "-lh", "/tmp/test") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } - - logDone("run - can remount old bindmount volume") } //GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container -func TestRunCreateVolumeEtc(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { cmd := exec.Command(dockerBinary, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, "nameserver 127.0.0.1") { - t.Fatal("/etc volume mount hides /etc/resolv.conf") + c.Fatal("/etc volume mount hides /etc/resolv.conf") } cmd = exec.Command(dockerBinary, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } if !strings.Contains(out, "test123") { - t.Fatal("/etc volume mount hides /etc/hostname") + c.Fatal("/etc volume mount hides /etc/hostname") } cmd = exec.Command(dockerBinary, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.Replace(out, "\n", " ", -1) if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { - t.Fatal("/etc volume mount hides /etc/hosts") + c.Fatal("/etc volume mount hides /etc/hosts") } - - logDone("run - verify /etc volume doesn't hide special bind mounts") } -func TestVolumesNoCopyData(t *testing.T) { - defer deleteImages("dataimage") - defer deleteAllContainers() +func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { if _, err := buildImage("dataimage", `FROM busybox RUN mkdir -p /foo RUN touch /foo/bar`, true); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--name", "test", "-v", "/foo", "busybox") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { - t.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) + c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) } tmpDir := randomUnixTmpDirPath("docker_test_bind_mount_copy_data") cmd = exec.Command(dockerBinary, "run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar") if out, _, err := runCommandWithOutput(cmd); err == nil || !strings.Contains(out, "No such file or directory") { - t.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) + c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) } - - logDone("run - volumes do not copy data for volumes-from and bindmounts") } -func TestRunVolumesNotRecreatedOnStart(t *testing.T) { - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunVolumesNotRecreatedOnStart(c *check.C) { + testRequires(c, SameHostDaemon) // Clear out any remnants from other tests - deleteAllContainers() info, err := ioutil.ReadDir(volumesConfigPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } if len(info) > 0 { for _, f := range info { if err := os.RemoveAll(volumesConfigPath + "/" + f.Name()); err != nil { - t.Fatal(err) + c.Fatal(err) } } } - defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "--name", "lone_starr", "busybox") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "start", "lone_starr") if _, err := runCommand(cmd); err != nil { - t.Fatal(err) + c.Fatal(err) } info, err = ioutil.ReadDir(volumesConfigPath) if err != nil { - t.Fatal(err) + c.Fatal(err) } if len(info) != 1 { - t.Fatalf("Expected only 1 volume have %v", len(info)) + c.Fatalf("Expected only 1 volume have %v", len(info)) } - - logDone("run - volumes not recreated on start") } -func TestRunNoOutputFromPullInStdout(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { // just run with unknown image cmd := exec.Command(dockerBinary, "run", "asdfsg") stdout := bytes.NewBuffer(nil) cmd.Stdout = stdout if err := cmd.Run(); err == nil { - t.Fatal("Run with unknown image should fail") + c.Fatal("Run with unknown image should fail") } if stdout.Len() != 0 { - t.Fatalf("Stdout contains output from pull: %s", stdout) + c.Fatalf("Stdout contains output from pull: %s", stdout) } - logDone("run - no output from pull in stdout") } -func TestRunVolumesCleanPaths(t *testing.T) { +func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { if _, err := buildImage("run_volumes_clean_paths", `FROM busybox VOLUME /foo/`, true); err != nil { - t.Fatal(err) + c.Fatal(err) } - defer deleteImages("run_volumes_clean_paths") - defer deleteAllContainers() cmd := exec.Command(dockerBinary, "run", "-v", "/foo", "-v", "/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out, err := inspectFieldMap("dark_helmet", "Volumes", "/foo/") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if out != "" { - t.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) + if out != "" { + c.Fatalf("Found unexpected volume entry for '/foo/' in volumes\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/foo") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, volumesStoragePath) { - t.Fatalf("Volume was not defined for /foo\n%q", out) + c.Fatalf("Volume was not defined for /foo\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar/") if err != nil { - t.Fatal(err) + c.Fatal(err) } - if out != "" { - t.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) + if out != "" { + c.Fatalf("Found unexpected volume entry for '/bar/' in volumes\n%q", out) } out, err = inspectFieldMap("dark_helmet", "Volumes", "/bar") if err != nil { - t.Fatal(err) + c.Fatal(err) } if !strings.Contains(out, volumesStoragePath) { - t.Fatalf("Volume was not defined for /bar\n%q", out) + c.Fatalf("Volume was not defined for /bar\n%q", out) } - - logDone("run - volume paths are cleaned") } // Regression test for #3631 -func TestRunSlowStdoutConsumer(t *testing.T) { - defer deleteAllContainers() - - c := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") +func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { + cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") - stdout, err := c.StdoutPipe() + stdout, err := cont.StdoutPipe() if err != nil { - t.Fatal(err) + c.Fatal(err) } - if err := c.Start(); err != nil { - t.Fatal(err) + if err := cont.Start(); err != nil { + c.Fatal(err) } n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) if err != nil { - t.Fatal(err) + c.Fatal(err) } expected := 2 * 1024 * 2000 if n != expected { - t.Fatalf("Expected %d, got %d", expected, n) + c.Fatalf("Expected %d, got %d", expected, n) } - - logDone("run - slow consumer") } -func TestRunAllowPortRangeThroughExpose(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err) + c.Fatal(err) } id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") if err != nil { - t.Fatal(err) + c.Fatal(err) } var ports nat.PortMap if err = unmarshalJSON([]byte(portstr), &ports); err != nil { - t.Fatal(err) + c.Fatal(err) } for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { - t.Fatalf("Port %d is out of range ", portnum) + c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { - t.Fatalf("Port is not mapped for the port %d", port) + c.Fatalf("Port is not mapped for the port %d", port) } } if err := deleteContainer(id); err != nil { - t.Fatal(err) + c.Fatal(err) } - logDone("run - allow port range through --expose flag") } // test docker run expose a invalid port -func TestRunExposePort(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunExposePort(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--expose", "80000", "busybox") out, _, err := runCommandWithOutput(runCmd) //expose a invalid port should with a error out if err == nil || !strings.Contains(out, "Invalid range format for --expose") { - t.Fatalf("run --expose a invalid port should with error out") + c.Fatalf("run --expose a invalid port should with error out") } - - logDone("run - can't expose a invalid port") } -func TestRunUnknownCommand(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunUnknownCommand(c *check.C) { + testRequires(c, NativeExecDriver) runCmd := exec.Command(dockerBinary, "create", "busybox", "/bin/nada") cID, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("Failed to create container: %v, output: %q", err, cID) + c.Fatalf("Failed to create container: %v, output: %q", err, cID) } cID = strings.TrimSpace(cID) @@ -2939,176 +2543,167 @@ func TestRunUnknownCommand(t *testing.T) { rc = strings.TrimSpace(rc) if err2 != nil { - t.Fatalf("Error getting status of container: %v", err2) + c.Fatalf("Error getting status of container: %v", err2) } if rc == "0" { - t.Fatalf("ExitCode(%v) cannot be 0", rc) + c.Fatalf("ExitCode(%v) cannot be 0", rc) } - - logDone("run - Unknown Command") } -func TestRunModeIpcHost(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { + testRequires(c, SameHostDaemon) hostIpc, err := os.Readlink("/proc/1/ns/ipc") if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostIpc != out2 { - t.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2) + c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostIpc == out2 { - t.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out2) + c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out2) } - - logDone("run - ipc host mode") } -func TestRunModeIpcContainer(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(out) state, err := inspectField(id, "State.Running") if err != nil { - t.Fatal(err) + c.Fatal(err) } if state != "true" { - t.Fatal("Container state is 'not running'") + c.Fatal("Container state is 'not running'") } pid1, err := inspectField(id, "State.Pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") out2, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if parentContainerIpc != out2 { - t.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2) + c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out2) } - - logDone("run - ipc container mode") } -func TestRunModeIpcContainerNotExists(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--ipc", "container:abcd1234", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if !strings.Contains(out, "abcd1234") || err == nil { - t.Fatalf("run IPC from a non exists container should with correct error out") + c.Fatalf("run IPC from a non exists container should with correct error out") } - - logDone("run - ipc from a non exists container failed with correct error out") } -func TestContainerNetworkMode(t *testing.T) { - defer deleteAllContainers() - testRequires(t, SameHostDaemon) +func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { + testRequires(c, SameHostDaemon) cmd := exec.Command(dockerBinary, "run", "-d", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } id := strings.TrimSpace(out) if err := waitRun(id); err != nil { - t.Fatal(err) + c.Fatal(err) } pid1, err := inspectField(id, "State.Pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd = exec.Command(dockerBinary, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") out2, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if parentContainerNet != out2 { - t.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2) + c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out2) } +} - logDone("run - container shared network namespace") +func (s *DockerSuite) TestContainerNetworkModeToSelf(c *check.C) { + cmd := exec.Command(dockerBinary, "run", "--name=me", "--net=container:me", "busybox", "true") + out, _, err := runCommandWithOutput(cmd) + if err == nil || !strings.Contains(out, "cannot join own network") { + c.Fatalf("using container net mode to self should result in an error") + } } -func TestRunModePidHost(t *testing.T) { - testRequires(t, NativeExecDriver, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunModePidHost(c *check.C) { + testRequires(c, NativeExecDriver, SameHostDaemon) hostPid, err := os.Readlink("/proc/1/ns/pid") if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") out2, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostPid != out2 { - t.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out2) + c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/pid") out2, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostPid == out2 { - t.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out2) + c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out2) } - - logDone("run - pid host mode") } -func TestRunTLSverify(t *testing.T) { +func (s *DockerSuite) TestRunTLSverify(c *check.C) { cmd := exec.Command(dockerBinary, "ps") out, ec, err := runCommandWithOutput(cmd) if err != nil || ec != 0 { - t.Fatalf("Should have worked: %v:\n%v", err, out) + c.Fatalf("Should have worked: %v:\n%v", err, out) } // Regardless of whether we specify true or false we need to @@ -3117,354 +2712,333 @@ func TestRunTLSverify(t *testing.T) { cmd = exec.Command(dockerBinary, "--tlsverify=false", "ps") out, ec, err = runCommandWithOutput(cmd) if err == nil || ec == 0 || !strings.Contains(out, "trying to connect") { - t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", ec, out, err) } cmd = exec.Command(dockerBinary, "--tlsverify=true", "ps") out, ec, err = runCommandWithOutput(cmd) if err == nil || ec == 0 || !strings.Contains(out, "cert") { - t.Fatalf("Should have failed: \nec:%v\nout:%v\nerr:%v", ec, out, err) + c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", ec, out, err) } - - logDone("run - verify tls is set for --tlsverify") } -func TestRunPortFromDockerRangeInUse(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { // first find allocator current position cmd := exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } id := strings.TrimSpace(out) cmd = exec.Command(dockerBinary, "port", id) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.TrimSpace(out) if out == "" { - t.Fatal("docker port command output is empty") + c.Fatal("docker port command output is empty") } out = strings.Split(out, ":")[1] lastPort, err := strconv.Atoi(out) if err != nil { - t.Fatal(err) + c.Fatal(err) } port := lastPort + 1 l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) if err != nil { - t.Fatal(err) + c.Fatal(err) } defer l.Close() cmd = exec.Command(dockerBinary, "run", "-d", "-p", ":80", "busybox", "top") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatalf(out, err) + c.Fatalf(out, err) } id = strings.TrimSpace(out) cmd = exec.Command(dockerBinary, "port", id) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - logDone("run - find another port if port from autorange already bound") } -func TestRunTtyWithPipe(t *testing.T) { - defer deleteAllContainers() - - done := make(chan struct{}) +func (s *DockerSuite) TestRunTtyWithPipe(c *check.C) { + errChan := make(chan error) go func() { - defer close(done) + defer close(errChan) cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") if _, err := cmd.StdinPipe(); err != nil { - t.Fatal(err) + errChan <- err + return } expected := "cannot enable tty mode" if out, _, err := runCommandWithOutput(cmd); err == nil { - t.Fatal("run should have failed") + errChan <- fmt.Errorf("run should have failed") + return } else if !strings.Contains(out, expected) { - t.Fatalf("run failed with error %q: expected %q", out, expected) + errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) + return } }() select { - case <-done: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(3 * time.Second): - t.Fatal("container is running but should have failed") + c.Fatal("container is running but should have failed") } - - logDone("run - forbid piped stdin with tty") } -func TestRunNonLocalMacAddress(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { addr := "00:16:3E:08:00:50" cmd := exec.Command(dockerBinary, "run", "--mac-address", addr, "busybox", "ifconfig") if out, _, err := runCommandWithOutput(cmd); err != nil || !strings.Contains(out, addr) { - t.Fatalf("Output should have contained %q: %s, %v", addr, out, err) + c.Fatalf("Output should have contained %q: %s, %v", addr, out, err) } - - logDone("run - use non-local mac-address") } -func TestRunNetHost(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunNetHost(c *check.C) { + testRequires(c, SameHostDaemon) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") out2, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostNet != out2 { - t.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out2) + c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out2) } cmd = exec.Command(dockerBinary, "run", "busybox", "readlink", "/proc/self/ns/net") out2, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out2) + c.Fatal(err, out2) } out2 = strings.Trim(out2, "\n") if hostNet == out2 { - t.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out2) + c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out2) } - - logDone("run - net host mode") } -func TestRunNetContainerWhichHost(t *testing.T) { - testRequires(t, SameHostDaemon) - defer deleteAllContainers() +func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { + testRequires(c, SameHostDaemon) hostNet, err := os.Readlink("/proc/1/ns/net") if err != nil { - t.Fatal(err) + c.Fatal(err) } cmd := exec.Command(dockerBinary, "run", "-d", "--net=host", "--name=test", "busybox", "top") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } cmd = exec.Command(dockerBinary, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } out = strings.Trim(out, "\n") if hostNet != out { - t.Fatalf("Container should have host network namespace") + c.Fatalf("Container should have host network namespace") } - - logDone("run - net container mode, where container in host mode") } -func TestRunAllowPortRangeThroughPublish(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { cmd := exec.Command(dockerBinary, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") out, _, err := runCommandWithOutput(cmd) id := strings.TrimSpace(out) portstr, err := inspectFieldJSON(id, "NetworkSettings.Ports") if err != nil { - t.Fatal(err) + c.Fatal(err) } var ports nat.PortMap err = unmarshalJSON([]byte(portstr), &ports) for port, binding := range ports { portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) if portnum < 3000 || portnum > 3003 { - t.Fatalf("Port %d is out of range ", portnum) + c.Fatalf("Port %d is out of range ", portnum) } if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { - t.Fatal("Port is not mapped for the port "+port, out) + c.Fatal("Port is not mapped for the port "+port, out) } } - logDone("run - allow port range through --expose flag") } -func TestRunOOMExitCode(t *testing.T) { - defer deleteAllContainers() - - done := make(chan struct{}) +func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { + errChan := make(chan error) go func() { - defer close(done) - + defer close(errChan) runCmd := exec.Command(dockerBinary, "run", "-m", "4MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") out, exitCode, _ := runCommandWithOutput(runCmd) if expected := 137; exitCode != expected { - t.Fatalf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) + errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) } }() select { - case <-done: + case err := <-errChan: + c.Assert(err, check.IsNil) case <-time.After(30 * time.Second): - t.Fatal("Timeout waiting for container to die on OOM") + c.Fatal("Timeout waiting for container to die on OOM") } - - logDone("run - exit code on oom") } -func TestRunSetDefaultRestartPolicy(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "test", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } cmd := exec.Command(dockerBinary, "inspect", "-f", "{{.HostConfig.RestartPolicy.Name}}", "test") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatalf("failed to inspect container: %v, output: %q", err, out) + c.Fatalf("failed to inspect container: %v, output: %q", err, out) } out = strings.Trim(out, "\r\n") if out != "no" { - t.Fatalf("Set default restart policy failed") + c.Fatalf("Set default restart policy failed") } - - logDone("run - set default restart policy success") } -func TestRunRestartMaxRetries(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { out, err := exec.Command(dockerBinary, "run", "-d", "--restart=on-failure:3", "busybox", "false").CombinedOutput() if err != nil { - t.Fatal(string(out), err) + c.Fatal(string(out), err) } id := strings.TrimSpace(string(out)) - if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 5); err != nil { - t.Fatal(err) + if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 10); err != nil { + c.Fatal(err) } count, err := inspectField(id, "RestartCount") if err != nil { - t.Fatal(err) + c.Fatal(err) } if count != "3" { - t.Fatalf("Container was restarted %s times, expected %d", count, 3) + c.Fatalf("Container was restarted %s times, expected %d", count, 3) } MaximumRetryCount, err := inspectField(id, "HostConfig.RestartPolicy.MaximumRetryCount") if err != nil { - t.Fatal(err) + c.Fatal(err) } if MaximumRetryCount != "3" { - t.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") + c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") } - logDone("run - test max-retries for --restart") } -func TestRunContainerWithWritableRootfs(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { out, err := exec.Command(dockerBinary, "run", "--rm", "busybox", "touch", "/file").CombinedOutput() if err != nil { - t.Fatal(string(out), err) + c.Fatal(string(out), err) } - logDone("run - writable rootfs") } -func TestRunContainerWithReadonlyRootfs(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { + testRequires(c, NativeExecDriver) out, err := exec.Command(dockerBinary, "run", "--read-only", "--rm", "busybox", "touch", "/file").CombinedOutput() if err == nil { - t.Fatal("expected container to error on run with read only error") + c.Fatal("expected container to error on run with read only error") } expected := "Read-only file system" if !strings.Contains(string(out), expected) { - t.Fatalf("expected output from failure to contain %s but contains %s", expected, out) + c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) } - logDone("run - read only rootfs") } -func TestRunVolumesFromRestartAfterRemoved(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "voltest", "-v", "/foo", "busybox")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "restarter", "--volumes-from", "voltest", "busybox", "top")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // Remove the main volume container and restart the consuming container out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "rm", "-f", "voltest")) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } // This should not fail since the volumes-from were already applied out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "restart", "restarter")) if err != nil { - t.Fatalf("expected container to restart successfully: %v\n%s", err, out) + c.Fatalf("expected container to restart successfully: %v\n%s", err, out) } - - logDone("run - can restart a volumes-from container after producer is removed") } // run container with --rm should remove container if exit code != 0 -func TestRunContainerWithRmFlagExitCodeNotEqualToZero(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { name := "flowers" runCmd := exec.Command(dockerBinary, "run", "--name", name, "--rm", "busybox", "ls", "/notexists") out, _, err := runCommandWithOutput(runCmd) if err == nil { - t.Fatal("Expected docker run to fail", out, err) + c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "" { - t.Fatal("Expected not to have containers", out) + c.Fatal("Expected not to have containers", out) } - - logDone("run - container is removed if run with --rm and exit code != 0") } -func TestRunContainerWithRmFlagCannotStartContainer(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { name := "sparkles" runCmd := exec.Command(dockerBinary, "run", "--name", name, "--rm", "busybox", "commandNotFound") out, _, err := runCommandWithOutput(runCmd) if err == nil { - t.Fatal("Expected docker run to fail", out, err) + c.Fatal("Expected docker run to fail", out, err) } out, err = getAllContainers() if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } if out != "" { - t.Fatal("Expected not to have containers", out) + c.Fatal("Expected not to have containers", out) } +} - logDone("run - container is removed if run with --rm and cannot start") +func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) { + name := "ibuildthecloud" + if out, err := exec.Command(dockerBinary, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi").CombinedOutput(); err != nil { + c.Fatal(err, out) + } + time.Sleep(1 * time.Second) + errchan := make(chan error) + go func() { + if out, err := exec.Command(dockerBinary, "kill", name).CombinedOutput(); err != nil { + errchan <- fmt.Errorf("%v:\n%s", err, out) + } + close(errchan) + }() + select { + case err := <-errchan: + c.Assert(err, check.IsNil) + case <-time.After(5 * time.Second): + c.Fatal("Kill container timed out") + } } diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go index 026f8279efba8..59b623162de44 100644 --- a/integration-cli/docker_cli_run_unix_test.go +++ b/integration-cli/docker_cli_run_unix_test.go @@ -3,6 +3,7 @@ package main import ( + "bufio" "fmt" "io/ioutil" "os" @@ -10,58 +11,52 @@ import ( "path" "path/filepath" "strings" - "testing" "time" "github.com/docker/docker/pkg/mount" + "github.com/go-check/check" "github.com/kr/pty" ) // #6509 -func TestRunRedirectStdout(t *testing.T) { - - defer deleteAllContainers() - +func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { checkRedirect := func(command string) { _, tty, err := pty.Open() if err != nil { - t.Fatalf("Could not open pty: %v", err) + c.Fatalf("Could not open pty: %v", err) } cmd := exec.Command("sh", "-c", command) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty - ch := make(chan struct{}) if err := cmd.Start(); err != nil { - t.Fatalf("start err: %v", err) + c.Fatalf("start err: %v", err) } + ch := make(chan error) go func() { - if err := cmd.Wait(); err != nil { - t.Fatalf("wait err=%v", err) - } + ch <- cmd.Wait() close(ch) }() select { case <-time.After(10 * time.Second): - t.Fatal("command timeout") - case <-ch: + c.Fatal("command timeout") + case err := <-ch: + if err != nil { + c.Fatalf("wait err=%v", err) + } } } checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") - - logDone("run - redirect stdout") } // Test recursive bind mount works by default -func TestRunWithVolumesIsRecursive(t *testing.T) { - defer deleteAllContainers() - +func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer os.RemoveAll(tmpDir) @@ -69,68 +64,62 @@ func TestRunWithVolumesIsRecursive(t *testing.T) { // Create a temporary tmpfs mount. tmpfsDir := filepath.Join(tmpDir, "tmpfs") if err := os.MkdirAll(tmpfsDir, 0777); err != nil { - t.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) + c.Fatalf("failed to mkdir at %s - %s", tmpfsDir, err) } if err := mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""); err != nil { - t.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) + c.Fatalf("failed to create a tmpfs mount at %s - %s", tmpfsDir, err) } f, err := ioutil.TempFile(tmpfsDir, "touch-me") if err != nil { - t.Fatal(err) + c.Fatal(err) } defer f.Close() runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") out, stderr, exitCode, err := runCommandWithStdoutStderr(runCmd) if err != nil && exitCode != 0 { - t.Fatal(out, stderr, err) + c.Fatal(out, stderr, err) } if !strings.Contains(out, filepath.Base(f.Name())) { - t.Fatal("Recursive bind mount test failed. Expected file not found") + c.Fatal("Recursive bind mount test failed. Expected file not found") } - - logDone("run - volumes are bind mounted recursively") } -func TestRunWithUlimits(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunWithUlimits(c *check.C) { + testRequires(c, NativeExecDriver) out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n")) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } ul := strings.TrimSpace(out) if ul != "42" { - t.Fatalf("expected `ulimit -n` to be 42, got %s", ul) + c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) } - - logDone("run - ulimits are set") } -func TestRunContainerWithCgroupParent(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { + testRequires(c, NativeExecDriver) cgroupParent := "test" data, err := ioutil.ReadFile("/proc/self/cgroup") if err != nil { - t.Fatalf("failed to read '/proc/self/cgroup - %v", err) + c.Fatalf("failed to read '/proc/self/cgroup - %v", err) } selfCgroupPaths := parseCgroupPaths(string(data)) selfCpuCgroup, found := selfCgroupPaths["memory"] if !found { - t.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths) + c.Fatalf("unable to find self cpu cgroup path. CgroupsPath: %v", selfCgroupPaths) } out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")) if err != nil { - t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { - t.Fatalf("unexpected output - %q", string(out)) + c.Fatalf("unexpected output - %q", string(out)) } found = false expectedCgroupPrefix := path.Join(selfCpuCgroup, cgroupParent) @@ -141,24 +130,22 @@ func TestRunContainerWithCgroupParent(t *testing.T) { } } if !found { - t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths) + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", expectedCgroupPrefix, cgroupPaths) } - logDone("run - cgroup parent") } -func TestRunContainerWithCgroupParentAbsPath(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { + testRequires(c, NativeExecDriver) cgroupParent := "/cgroup-parent/test" out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "--cgroup-parent", cgroupParent, "--rm", "busybox", "cat", "/proc/self/cgroup")) if err != nil { - t.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) + c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } cgroupPaths := parseCgroupPaths(string(out)) if len(cgroupPaths) == 0 { - t.Fatalf("unexpected output - %q", string(out)) + c.Fatalf("unexpected output - %q", string(out)) } found := false for _, path := range cgroupPaths { @@ -168,36 +155,98 @@ func TestRunContainerWithCgroupParentAbsPath(t *testing.T) { } } if !found { - t.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths) + c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have prefix %q. Cgroup Paths: %v", cgroupParent, cgroupPaths) } - - logDone("run - cgroup parent with absolute cgroup path") } -func TestRunDeviceDirectory(t *testing.T) { - testRequires(t, NativeExecDriver) - defer deleteAllContainers() +func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { + testRequires(c, NativeExecDriver) cmd := exec.Command(dockerBinary, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "timer") { - t.Fatalf("expected output /dev/snd/timer, received %s", actual) + c.Fatalf("expected output /dev/snd/timer, received %s", actual) } cmd = exec.Command(dockerBinary, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } if actual := strings.Trim(out, "\r\n"); !strings.Contains(out, "seq") { - t.Fatalf("expected output /dev/othersnd/seq, received %s", actual) + c.Fatalf("expected output /dev/othersnd/seq, received %s", actual) + } +} + +// TestRunDetach checks attaching and detaching with the escape sequence. +func (s *DockerSuite) TestRunAttachDetach(c *check.C) { + name := "attach-detach" + cmd := exec.Command(dockerBinary, "run", "--name", name, "-it", "busybox", "cat") + stdout, err := cmd.StdoutPipe() + if err != nil { + c.Fatal(err) + } + cpty, tty, err := pty.Open() + if err != nil { + c.Fatal(err) + } + defer cpty.Close() + cmd.Stdin = tty + if err := cmd.Start(); err != nil { + c.Fatal(err) + } + if err := waitRun(name); err != nil { + c.Fatal(err) } - logDone("run - test --device directory mounts all internal devices") + if _, err := cpty.Write([]byte("hello\n")); err != nil { + c.Fatal(err) + } + + out, err := bufio.NewReader(stdout).ReadString('\n') + if err != nil { + c.Fatal(err) + } + if strings.TrimSpace(out) != "hello" { + c.Fatalf("expected 'hello', got %q", out) + } + + // escape sequence + if _, err := cpty.Write([]byte{16}); err != nil { + c.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + if _, err := cpty.Write([]byte{17}); err != nil { + c.Fatal(err) + } + + ch := make(chan struct{}) + go func() { + cmd.Wait() + ch <- struct{}{} + }() + + running, err := inspectField(name, "State.Running") + if err != nil { + c.Fatal(err) + } + if running != "true" { + c.Fatal("expected container to still be running") + } + + go func() { + exec.Command(dockerBinary, "kill", name).Run() + }() + + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + c.Fatal("timed out waiting for container to exit") + } } diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go index c7bfb945d38ea..f83f6645ac616 100644 --- a/integration-cli/docker_cli_save_load_test.go +++ b/integration-cli/docker_cli_save_load_test.go @@ -9,15 +9,16 @@ import ( "reflect" "sort" "strings" - "testing" + + "github.com/go-check/check" ) // save a repo using gz compression and try to load it using stdout -func TestSaveXzAndLoadRepoStdout(t *testing.T) { +func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %v %v", out, err) + c.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -27,19 +28,19 @@ func TestSaveXzAndLoadRepoStdout(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + c.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) out, _, err = runCommandWithOutput(commitCmd) if err != nil { - t.Fatalf("failed to commit container: %v %v", out, err) + c.Fatalf("failed to commit container: %v %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist before saving it: %v %v", before, err) + c.Fatalf("the repo should exist before saving it: %v %v", before, err) } repoTarball, _, err := runCommandPipelineWithOutput( @@ -47,7 +48,7 @@ func TestSaveXzAndLoadRepoStdout(t *testing.T) { exec.Command("xz", "-c"), exec.Command("gzip", "-c")) if err != nil { - t.Fatalf("failed to save repo: %v %v", out, err) + c.Fatalf("failed to save repo: %v %v", out, err) } deleteImages(repoName) @@ -55,26 +56,25 @@ func TestSaveXzAndLoadRepoStdout(t *testing.T) { loadCmd.Stdin = strings.NewReader(repoTarball) out, _, err = runCommandWithOutput(loadCmd) if err == nil { - t.Fatalf("expected error, but succeeded with no error and output: %v", out) + c.Fatalf("expected error, but succeeded with no error and output: %v", out) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err == nil { - t.Fatalf("the repo should not exist: %v", after) + c.Fatalf("the repo should not exist: %v", after) } deleteImages(repoName) - logDone("load - save a repo with xz compression & load it using stdout") } // save a repo using xz+gz compression and try to load it using stdout -func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { +func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %v %v", out, err) + c.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -84,19 +84,19 @@ func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) out, _, err = runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) + c.Fatalf("output should've been a container id: %v %v", cleanedContainerID, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) out, _, err = runCommandWithOutput(commitCmd) if err != nil { - t.Fatalf("failed to commit container: %v %v", out, err) + c.Fatalf("failed to commit container: %v %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist before saving it: %v %v", before, err) + c.Fatalf("the repo should exist before saving it: %v %v", before, err) } out, _, err = runCommandPipelineWithOutput( @@ -104,7 +104,7 @@ func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { exec.Command("xz", "-c"), exec.Command("gzip", "-c")) if err != nil { - t.Fatalf("failed to save repo: %v %v", out, err) + c.Fatalf("failed to save repo: %v %v", out, err) } deleteImages(repoName) @@ -113,34 +113,32 @@ func TestSaveXzGzAndLoadRepoStdout(t *testing.T) { loadCmd.Stdin = strings.NewReader(out) out, _, err = runCommandWithOutput(loadCmd) if err == nil { - t.Fatalf("expected error, but succeeded with no error and output: %v", out) + c.Fatalf("expected error, but succeeded with no error and output: %v", out) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err == nil { - t.Fatalf("the repo should not exist: %v", after) + c.Fatalf("the repo should not exist: %v", after) } deleteContainer(cleanedContainerID) deleteImages(repoName) - logDone("load - save a repo with xz+gz compression & load it using stdout") } -func TestSaveSingleTag(t *testing.T) { +func (s *DockerSuite) TestSaveSingleTag(c *check.C) { repoName := "foobar-save-single-tag-test" tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) - defer deleteImages(repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatalf("failed to tag repo: %s, %v", out, err) + c.Fatalf("failed to tag repo: %s, %v", out, err) } idCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName) out, _, err := runCommandWithOutput(idCmd) if err != nil { - t.Fatalf("failed to get repo ID: %s, %v", out, err) + c.Fatalf("failed to get repo ID: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) @@ -149,25 +147,23 @@ func TestSaveSingleTag(t *testing.T) { exec.Command("tar", "t"), exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) if err != nil { - t.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) + c.Fatalf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err) } - logDone("save - save a specific image:tag") } -func TestSaveImageId(t *testing.T) { +func (s *DockerSuite) TestSaveImageId(c *check.C) { repoName := "foobar-save-image-id-test" tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) - defer deleteImages(repoName) if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatalf("failed to tag repo: %s, %v", out, err) + c.Fatalf("failed to tag repo: %s, %v", out, err) } idLongCmd := exec.Command(dockerBinary, "images", "-q", "--no-trunc", repoName) out, _, err := runCommandWithOutput(idLongCmd) if err != nil { - t.Fatalf("failed to get repo ID: %s, %v", out, err) + c.Fatalf("failed to get repo ID: %s, %v", out, err) } cleanedLongImageID := strings.TrimSpace(out) @@ -175,7 +171,7 @@ func TestSaveImageId(t *testing.T) { idShortCmd := exec.Command(dockerBinary, "images", "-q", repoName) out, _, err = runCommandWithOutput(idShortCmd) if err != nil { - t.Fatalf("failed to get repo short ID: %s, %v", out, err) + c.Fatalf("failed to get repo short ID: %s, %v", out, err) } cleanedShortImageID := strings.TrimSpace(out) @@ -184,19 +180,19 @@ func TestSaveImageId(t *testing.T) { tarCmd := exec.Command("tar", "t") tarCmd.Stdin, err = saveCmd.StdoutPipe() if err != nil { - t.Fatalf("cannot set stdout pipe for tar: %v", err) + c.Fatalf("cannot set stdout pipe for tar: %v", err) } grepCmd := exec.Command("grep", cleanedLongImageID) grepCmd.Stdin, err = tarCmd.StdoutPipe() if err != nil { - t.Fatalf("cannot set stdout pipe for grep: %v", err) + c.Fatalf("cannot set stdout pipe for grep: %v", err) } if err = tarCmd.Start(); err != nil { - t.Fatalf("tar failed with error: %v", err) + c.Fatalf("tar failed with error: %v", err) } if err = saveCmd.Start(); err != nil { - t.Fatalf("docker save failed with error: %v", err) + c.Fatalf("docker save failed with error: %v", err) } defer saveCmd.Wait() defer tarCmd.Wait() @@ -204,40 +200,38 @@ func TestSaveImageId(t *testing.T) { out, _, err = runCommandWithOutput(grepCmd) if err != nil { - t.Fatalf("failed to save repo with image ID: %s, %v", out, err) + c.Fatalf("failed to save repo with image ID: %s, %v", out, err) } - logDone("save - save a image by ID") } // save a repo and try to load it using flags -func TestSaveAndLoadRepoFlags(t *testing.T) { +func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %s, %v", out, err) + c.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) repoName := "foobar-save-load-test" inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("output should've been a container id: %s, %v", out, err) + c.Fatalf("output should've been a container id: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) deleteImages(repoName) if out, _, err = runCommandWithOutput(commitCmd); err != nil { - t.Fatalf("failed to commit container: %s, %v", out, err) + c.Fatalf("failed to commit container: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + c.Fatalf("the repo should exist before saving it: %s, %v", before, err) } @@ -245,39 +239,36 @@ func TestSaveAndLoadRepoFlags(t *testing.T) { exec.Command(dockerBinary, "save", repoName), exec.Command(dockerBinary, "load")) if err != nil { - t.Fatalf("failed to save and load repo: %s, %v", out, err) + c.Fatalf("failed to save and load repo: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist after loading it: %s, %v", after, err) + c.Fatalf("the repo should exist after loading it: %s, %v", after, err) } if before != after { - t.Fatalf("inspect is not the same after a save / load") + c.Fatalf("inspect is not the same after a save / load") } - logDone("save - save a repo using -o && load a repo using -i") } -func TestSaveMultipleNames(t *testing.T) { +func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { repoName := "foobar-save-multi-name-test" // Make one image tagCmd := exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatalf("failed to tag repo: %s, %v", out, err) + c.Fatalf("failed to tag repo: %s, %v", out, err) } - defer deleteImages(repoName + "-one") // Make two images tagCmd = exec.Command(dockerBinary, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) out, _, err := runCommandWithOutput(tagCmd) if err != nil { - t.Fatalf("failed to tag repo: %s, %v", out, err) + c.Fatalf("failed to tag repo: %s, %v", out, err) } - defer deleteImages(repoName + "-two") out, _, err = runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), @@ -285,13 +276,12 @@ func TestSaveMultipleNames(t *testing.T) { exec.Command("grep", "-q", "-E", "(-one|-two)"), ) if err != nil { - t.Fatalf("failed to save multiple repos: %s, %v", out, err) + c.Fatalf("failed to save multiple repos: %s, %v", out, err) } - logDone("save - save by multiple names") } -func TestSaveRepoWithMultipleImages(t *testing.T) { +func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { makeImage := func(from string, tag string) string { runCmd := exec.Command(dockerBinary, "run", "-d", from, "true") @@ -300,14 +290,13 @@ func TestSaveRepoWithMultipleImages(t *testing.T) { err error ) if out, _, err = runCommandWithOutput(runCmd); err != nil { - t.Fatalf("failed to create a container: %v %v", out, err) + c.Fatalf("failed to create a container: %v %v", out, err) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, tag) if out, _, err = runCommandWithOutput(commitCmd); err != nil { - t.Fatalf("failed to commit container: %v %v", out, err) + c.Fatalf("failed to commit container: %v %v", out, err) } imageID := strings.TrimSpace(out) return imageID @@ -318,9 +307,7 @@ func TestSaveRepoWithMultipleImages(t *testing.T) { tagBar := repoName + ":bar" idFoo := makeImage("busybox:latest", tagFoo) - defer deleteImages(idFoo) idBar := makeImage("busybox:latest", tagBar) - defer deleteImages(idBar) deleteImages(repoName) @@ -331,14 +318,14 @@ func TestSaveRepoWithMultipleImages(t *testing.T) { exec.Command("grep", "VERSION"), exec.Command("cut", "-d", "/", "-f1")) if err != nil { - t.Fatalf("failed to save multiple images: %s, %v", out, err) + c.Fatalf("failed to save multiple images: %s, %v", out, err) } actual := strings.Split(strings.TrimSpace(out), "\n") // make the list of expected layers out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "history", "-q", "--no-trunc", "busybox:latest")) if err != nil { - t.Fatalf("failed to get history: %s, %v", out, err) + c.Fatalf("failed to get history: %s, %v", out, err) } expected := append(strings.Split(strings.TrimSpace(out), "\n"), idFoo, idBar) @@ -346,46 +333,44 @@ func TestSaveRepoWithMultipleImages(t *testing.T) { sort.Strings(actual) sort.Strings(expected) if !reflect.DeepEqual(expected, actual) { - t.Fatalf("achive does not contains the right layers: got %v, expected %v", actual, expected) + c.Fatalf("archive does not contains the right layers: got %v, expected %v", actual, expected) } - logDone("save - save repository with multiple images") } // Issue #6722 #5892 ensure directories are included in changes -func TestSaveDirectoryPermissions(t *testing.T) { +func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} name := "save-directory-permissions" tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") if err != nil { - t.Errorf("failed to create temporary directory: %s", err) + c.Errorf("failed to create temporary directory: %s", err) } extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") os.Mkdir(extractionDirectory, 0777) defer os.RemoveAll(tmpDir) - defer deleteImages(name) _, err = buildImage(name, `FROM busybox RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, true) if err != nil { - t.Fatal(err) + c.Fatal(err) } if out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", name), exec.Command("tar", "-xf", "-", "-C", extractionDirectory), ); err != nil { - t.Errorf("failed to save and extract image: %s", out) + c.Errorf("failed to save and extract image: %s", out) } dirs, err := ioutil.ReadDir(extractionDirectory) if err != nil { - t.Errorf("failed to get a listing of the layer directories: %s", err) + c.Errorf("failed to get a listing of the layer directories: %s", err) } found := false @@ -396,7 +381,7 @@ func TestSaveDirectoryPermissions(t *testing.T) { f, err := os.Open(layerPath) if err != nil { - t.Fatalf("failed to open %s: %s", layerPath, err) + c.Fatalf("failed to open %s: %s", layerPath, err) } entries, err := ListTar(f) @@ -406,7 +391,7 @@ func TestSaveDirectoryPermissions(t *testing.T) { } } if err != nil { - t.Fatalf("encountered error while listing tar entries: %s", err) + c.Fatalf("encountered error while listing tar entries: %s", err) } if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { @@ -417,8 +402,7 @@ func TestSaveDirectoryPermissions(t *testing.T) { } if !found { - t.Fatalf("failed to find the layer with the right content listing") + c.Fatalf("failed to find the layer with the right content listing") } - logDone("save - ensure directories exist in exported layers") } diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go index 7eb948d7aec33..658666d6b806e 100644 --- a/integration-cli/docker_cli_save_load_unix_test.go +++ b/integration-cli/docker_cli_save_load_unix_test.go @@ -8,17 +8,17 @@ import ( "os" "os/exec" "strings" - "testing" "github.com/docker/docker/vendor/src/github.com/kr/pty" + "github.com/go-check/check" ) // save a repo and try to load it using stdout -func TestSaveAndLoadRepoStdout(t *testing.T) { +func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to create a container: %s, %v", out, err) + c.Fatalf("failed to create a container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -27,25 +27,25 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { inspectCmd := exec.Command(dockerBinary, "inspect", cleanedContainerID) if out, _, err = runCommandWithOutput(inspectCmd); err != nil { - t.Fatalf("output should've been a container id: %s, %v", out, err) + c.Fatalf("output should've been a container id: %s, %v", out, err) } commitCmd := exec.Command(dockerBinary, "commit", cleanedContainerID, repoName) if out, _, err = runCommandWithOutput(commitCmd); err != nil { - t.Fatalf("failed to commit container: %s, %v", out, err) + c.Fatalf("failed to commit container: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) before, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist before saving it: %s, %v", before, err) + c.Fatalf("the repo should exist before saving it: %s, %v", before, err) } saveCmdTemplate := `%v save %v > /tmp/foobar-save-load-test.tar` saveCmdFinal := fmt.Sprintf(saveCmdTemplate, dockerBinary, repoName) saveCmd := exec.Command("bash", "-c", saveCmdFinal) if out, _, err = runCommandWithOutput(saveCmd); err != nil { - t.Fatalf("failed to save repo: %s, %v", out, err) + c.Fatalf("failed to save repo: %s, %v", out, err) } deleteImages(repoName) @@ -53,17 +53,17 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { loadCmdFinal := `cat /tmp/foobar-save-load-test.tar | docker load` loadCmd := exec.Command("bash", "-c", loadCmdFinal) if out, _, err = runCommandWithOutput(loadCmd); err != nil { - t.Fatalf("failed to load repo: %s, %v", out, err) + c.Fatalf("failed to load repo: %s, %v", out, err) } inspectCmd = exec.Command(dockerBinary, "inspect", repoName) after, _, err := runCommandWithOutput(inspectCmd) if err != nil { - t.Fatalf("the repo should exist after loading it: %s %v", after, err) + c.Fatalf("the repo should exist after loading it: %s %v", after, err) } if before != after { - t.Fatalf("inspect is not the same after a save / load") + c.Fatalf("inspect is not the same after a save / load") } deleteContainer(cleanedContainerID) @@ -73,29 +73,28 @@ func TestSaveAndLoadRepoStdout(t *testing.T) { pty, tty, err := pty.Open() if err != nil { - t.Fatalf("Could not open pty: %v", err) + c.Fatalf("Could not open pty: %v", err) } cmd := exec.Command(dockerBinary, "save", repoName) cmd.Stdin = tty cmd.Stdout = tty cmd.Stderr = tty if err := cmd.Start(); err != nil { - t.Fatalf("start err: %v", err) + c.Fatalf("start err: %v", err) } if err := cmd.Wait(); err == nil { - t.Fatal("did not break writing to a TTY") + c.Fatal("did not break writing to a TTY") } buf := make([]byte, 1024) n, err := pty.Read(buf) if err != nil { - t.Fatal("could not read tty output") + c.Fatal("could not read tty output") } if !bytes.Contains(buf[:n], []byte("Cowardly refusing")) { - t.Fatal("help output is not being yielded", out) + c.Fatal("help output is not being yielded", out) } - logDone("save - save/load a repo using stdout") } diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go index a3546103f39e5..c5ecdd03b9c11 100644 --- a/integration-cli/docker_cli_search_test.go +++ b/integration-cli/docker_cli_search_test.go @@ -3,45 +3,93 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // search for repos named "registry" on the central registry -func TestSearchOnCentralRegistry(t *testing.T) { - testRequires(t, Network) +func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { + testRequires(c, Network) searchCmd := exec.Command(dockerBinary, "search", "busybox") out, exitCode, err := runCommandWithOutput(searchCmd) if err != nil || exitCode != 0 { - t.Fatalf("failed to search on the central registry: %s, %v", out, err) + c.Fatalf("failed to search on the central registry: %s, %v", out, err) } if !strings.Contains(out, "Busybox base image.") { - t.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") + c.Fatal("couldn't find any repository named (or containing) 'Busybox base image.'") } - logDone("search - search for repositories named (or containing) 'Busybox base image.'") } -func TestSearchStarsOptionWithWrongParameter(t *testing.T) { +func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { searchCmdStarsChars := exec.Command(dockerBinary, "search", "--stars=a", "busybox") out, exitCode, err := runCommandWithOutput(searchCmdStarsChars) if err == nil || exitCode == 0 { - t.Fatalf("Should not get right information: %s, %v", out, err) + c.Fatalf("Should not get right information: %s, %v", out, err) } if !strings.Contains(out, "invalid value") { - t.Fatal("couldn't find the invalid value warning") + c.Fatal("couldn't find the invalid value warning") } searchCmdStarsNegativeNumber := exec.Command(dockerBinary, "search", "-s=-1", "busybox") out, exitCode, err = runCommandWithOutput(searchCmdStarsNegativeNumber) if err == nil || exitCode == 0 { - t.Fatalf("Should not get right information: %s, %v", out, err) + c.Fatalf("Should not get right information: %s, %v", out, err) } if !strings.Contains(out, "invalid value") { - t.Fatal("couldn't find the invalid value warning") + c.Fatal("couldn't find the invalid value warning") + } + +} + +func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { + testRequires(c, Network) + searchCmdhelp := exec.Command(dockerBinary, "search", "--help") + out, exitCode, err := runCommandWithOutput(searchCmdhelp) + if err != nil || exitCode != 0 { + c.Fatalf("failed to get search help information: %s, %v", out, err) + } + + if !strings.Contains(out, "Usage: docker search [OPTIONS] TERM") { + c.Fatalf("failed to show docker search usage: %s, %v", out, err) + } + + searchCmd := exec.Command(dockerBinary, "search", "busybox") + outSearchCmd, exitCode, err := runCommandWithOutput(searchCmd) + if err != nil || exitCode != 0 { + c.Fatalf("failed to search on the central registry: %s, %v", outSearchCmd, err) + } + + searchCmdautomated := exec.Command(dockerBinary, "search", "--automated=true", "busybox") + outSearchCmdautomated, exitCode, err := runCommandWithOutput(searchCmdautomated) //The busybox is a busybox base image, not an AUTOMATED image. + if err != nil || exitCode != 0 { + c.Fatalf("failed to search with automated=true on the central registry: %s, %v", outSearchCmdautomated, err) + } + + outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") + for i := range outSearchCmdautomatedSlice { + if strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox ") { + c.Fatalf("The busybox is not an AUTOMATED image: %s, %v", out, err) + } + } + + searchCmdStars := exec.Command(dockerBinary, "search", "-s=2", "busybox") + outSearchCmdStars, exitCode, err := runCommandWithOutput(searchCmdStars) + if err != nil || exitCode != 0 { + c.Fatalf("failed to search with stars=2 on the central registry: %s, %v", outSearchCmdStars, err) + } + + if strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]") { + c.Fatalf("The quantity of images with stars should be less than that of all images: %s, %v", outSearchCmdStars, err) + } + + searchCmdOptions := exec.Command(dockerBinary, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") + out, exitCode, err = runCommandWithOutput(searchCmdOptions) + if err != nil || exitCode != 0 { + c.Fatalf("failed to search with stars&automated&no-trunc options on the central registry: %s, %v", out, err) } - logDone("search - Verify search with wrong parameter.") } diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go index 25b23e888f4b1..fddc8c97bbf0d 100644 --- a/integration-cli/docker_cli_start_test.go +++ b/integration-cli/docker_cli_start_test.go @@ -4,49 +4,48 @@ import ( "fmt" "os/exec" "strings" - "testing" "time" + + "github.com/go-check/check" ) // Regression test for https://github.com/docker/docker/issues/7843 -func TestStartAttachReturnsOnError(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { - dockerCmd(t, "run", "-d", "--name", "test", "busybox") - dockerCmd(t, "wait", "test") + dockerCmd(c, "run", "-d", "--name", "test", "busybox") + dockerCmd(c, "wait", "test") // Expect this to fail because the above container is stopped, this is what we want if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "--link", "test:test", "busybox")); err == nil { - t.Fatal("Expected error but got none") + c.Fatal("Expected error but got none") } - ch := make(chan struct{}) + ch := make(chan error) go func() { // Attempt to start attached to the container that won't start // This should return an error immediately since the container can't be started if _, err := runCommand(exec.Command(dockerBinary, "start", "-a", "test2")); err == nil { - t.Fatal("Expected error but got none") + ch <- fmt.Errorf("Expected error but got none") } close(ch) }() select { - case <-ch: + case err := <-ch: + c.Assert(err, check.IsNil) case <-time.After(time.Second): - t.Fatalf("Attach did not exit properly") + c.Fatalf("Attach did not exit properly") } - logDone("start - error on start with attach exits") } // gh#8555: Exit code should be passed through when using start -a -func TestStartAttachCorrectExitCode(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } out = strings.TrimSpace(out) @@ -54,203 +53,191 @@ func TestStartAttachCorrectExitCode(t *testing.T) { // make sure the container has exited before trying the "start -a" waitCmd := exec.Command(dockerBinary, "wait", out) if _, _, err = runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("Failed to wait on container: %v", err) + c.Fatalf("Failed to wait on container: %v", err) } startCmd := exec.Command(dockerBinary, "start", "-a", out) startOut, exitCode, err := runCommandWithOutput(startCmd) if err != nil && !strings.Contains("exit status 1", fmt.Sprintf("%s", err)) { - t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) + c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) } if exitCode != 1 { - t.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) + c.Fatalf("start -a did not respond with proper exit code: expected 1, got %d", exitCode) } - logDone("start - correct exit code returned with -a") } -func TestStartSilentAttach(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartAttachSilent(c *check.C) { name := "teststartattachcorrectexitcode" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "echo", "test") out, _, _, err := runCommandWithStdoutStderr(runCmd) if err != nil { - t.Fatalf("failed to run container: %v, output: %q", err, out) + c.Fatalf("failed to run container: %v, output: %q", err, out) } // make sure the container has exited before trying the "start -a" waitCmd := exec.Command(dockerBinary, "wait", name) if _, _, err = runCommandWithOutput(waitCmd); err != nil { - t.Fatalf("wait command failed with error: %v", err) + c.Fatalf("wait command failed with error: %v", err) } startCmd := exec.Command(dockerBinary, "start", "-a", name) startOut, _, err := runCommandWithOutput(startCmd) if err != nil { - t.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) + c.Fatalf("start command failed unexpectedly with error: %v, output: %q", err, startOut) } if expected := "test\n"; startOut != expected { - t.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut) + c.Fatalf("start -a produced unexpected output: expected %q, got %q", expected, startOut) } - logDone("start - don't echo container ID when attaching") } -func TestStartRecordError(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartRecordError(c *check.C) { // when container runs successfully, we should not have state.Error - dockerCmd(t, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") + dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") stateErr, err := inspectField("test", "State.Error") if err != nil { - t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + c.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) } if stateErr != "" { - t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) } // Expect this to fail and records error because of ports conflict out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top")) if err == nil { - t.Fatalf("Expected error but got none, output %q", out) + c.Fatalf("Expected error but got none, output %q", out) } stateErr, err = inspectField("test2", "State.Error") if err != nil { - t.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err) + c.Fatalf("Failed to inspect %q state's error, got error %q", "test2", err) } expected := "port is already allocated" if stateErr == "" || !strings.Contains(stateErr, expected) { - t.Fatalf("State.Error(%q) does not include %q", stateErr, expected) + c.Fatalf("State.Error(%q) does not include %q", stateErr, expected) } // Expect the conflict to be resolved when we stop the initial container - dockerCmd(t, "stop", "test") - dockerCmd(t, "start", "test2") + dockerCmd(c, "stop", "test") + dockerCmd(c, "start", "test2") stateErr, err = inspectField("test2", "State.Error") if err != nil { - t.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) + c.Fatalf("Failed to inspect %q state's error, got error %q", "test", err) } if stateErr != "" { - t.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) + c.Fatalf("Expected to not have state error but got state.Error(%q)", stateErr) } - logDone("start - set state error when start is unsuccessful") } // gh#8726: a failed Start() breaks --volumes-from on subsequent Start()'s -func TestStartVolumesFromFailsCleanly(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartVolumesFromFailsCleanly(c *check.C) { // Create the first data volume - dockerCmd(t, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") + dockerCmd(c, "run", "-d", "--name", "data_before", "-v", "/foo", "busybox") // Expect this to fail because the data test after contaienr doesn't exist yet if _, err := runCommand(exec.Command(dockerBinary, "run", "-d", "--name", "consumer", "--volumes-from", "data_before", "--volumes-from", "data_after", "busybox")); err == nil { - t.Fatal("Expected error but got none") + c.Fatal("Expected error but got none") } // Create the second data volume - dockerCmd(t, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") + dockerCmd(c, "run", "-d", "--name", "data_after", "-v", "/bar", "busybox") // Now, all the volumes should be there - dockerCmd(t, "start", "consumer") + dockerCmd(c, "start", "consumer") // Check that we have the volumes we want - out, _, _ := dockerCmd(t, "inspect", "--format='{{ len .Volumes }}'", "consumer") + out, _ := dockerCmd(c, "inspect", "--format='{{ len .Volumes }}'", "consumer") nVolumes := strings.Trim(out, " \r\n'") if nVolumes != "2" { - t.Fatalf("Missing volumes: expected 2, got %s", nVolumes) + c.Fatalf("Missing volumes: expected 2, got %s", nVolumes) } - logDone("start - missing containers in --volumes-from did not affect subsequent runs") } -func TestStartPausedContainer(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartPausedContainer(c *check.C) { defer unpauseAllContainers() runCmd := exec.Command(dockerBinary, "run", "-d", "--name", "testing", "busybox", "top") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "pause", "testing") if out, _, err := runCommandWithOutput(runCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } runCmd = exec.Command(dockerBinary, "start", "testing") if out, _, err := runCommandWithOutput(runCmd); err == nil || !strings.Contains(out, "Cannot start a paused container, try unpause instead.") { - t.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err) + c.Fatalf("an error should have been shown that you cannot start paused container: %s\n%v", out, err) } - logDone("start - error should show if trying to start paused container") } -func TestStartMultipleContainers(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { // run a container named 'parent' and create two container link to `parent` cmd := exec.Command(dockerBinary, "run", "-d", "--name", "parent", "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } for _, container := range []string{"child_first", "child_second"} { cmd = exec.Command(dockerBinary, "create", "--name", container, "--link", "parent:parent", "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } } // stop 'parent' container cmd = exec.Command(dockerBinary, "stop", "parent") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", "parent") out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != "false" { - t.Fatal("Container should be stopped") + c.Fatal("Container should be stopped") } - // start all the three containers, container `child_first` start first which should be faild + // start all the three containers, container `child_first` start first which should be failed // container 'parent' start second and then start container 'child_second' cmd = exec.Command(dockerBinary, "start", "child_first", "parent", "child_second") out, _, err = runCommandWithOutput(cmd) if !strings.Contains(out, "Cannot start container child_first") || err == nil { - t.Fatal("Expected error but got none") + c.Fatal("Expected error but got none") } for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container) out, _, err = runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != expected { - t.Fatal("Container running state wrong") + c.Fatal("Container running state wrong") } } - logDone("start - start multiple containers continue on one failed") } -func TestStartAttachMultipleContainers(t *testing.T) { +func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { var cmd *exec.Cmd - defer deleteAllContainers() // run multiple containers to test for _, container := range []string{"test1", "test2", "test3"} { cmd = exec.Command(dockerBinary, "run", "-d", "--name", container, "busybox", "top") if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } } @@ -258,7 +245,7 @@ func TestStartAttachMultipleContainers(t *testing.T) { for _, container := range []string{"test1", "test2", "test3"} { cmd = exec.Command(dockerBinary, "stop", container) if out, _, err := runCommandWithOutput(cmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } } @@ -267,7 +254,7 @@ func TestStartAttachMultipleContainers(t *testing.T) { cmd = exec.Command(dockerBinary, "start", option, "test1", "test2", "test3") out, _, err := runCommandWithOutput(cmd) if !strings.Contains(out, "You cannot start and attach multiple containers at once.") || err == nil { - t.Fatal("Expected error but got none") + c.Fatal("Expected error but got none") } } @@ -276,13 +263,12 @@ func TestStartAttachMultipleContainers(t *testing.T) { cmd = exec.Command(dockerBinary, "inspect", "-f", "{{.State.Running}}", container) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } out = strings.Trim(out, "\r\n") if out != expected { - t.Fatal("Container running state wrong") + c.Fatal("Container running state wrong") } } - logDone("start - error on start and attach multiple containers at once") } diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go index 8a5d322713534..35225f9c1e5ec 100644 --- a/integration-cli/docker_cli_tag_test.go +++ b/integration-cli/docker_cli_tag_test.go @@ -3,48 +3,40 @@ package main import ( "os/exec" "strings" - "testing" "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" ) // tagging a named image in a new unprefixed repo should work -func TestTagUnprefixedRepoByName(t *testing.T) { +func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { if err := pullImageIfNotExist("busybox:latest"); err != nil { - t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "testfoobarbaz") if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - deleteImages("testfoobarbaz") - - logDone("tag - busybox -> testfoobarbaz") } // tagging an image by ID in a new unprefixed repo should work -func TestTagUnprefixedRepoByID(t *testing.T) { +func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { getIDCmd := exec.Command(dockerBinary, "inspect", "-f", "{{.Id}}", "busybox") out, _, err := runCommandWithOutput(getIDCmd) if err != nil { - t.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) + c.Fatalf("failed to get the image ID of busybox: %s, %v", out, err) } cleanedImageID := strings.TrimSpace(out) tagCmd := exec.Command(dockerBinary, "tag", cleanedImageID, "testfoobarbaz") if out, _, err = runCommandWithOutput(tagCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - - deleteImages("testfoobarbaz") - - logDone("tag - busybox's image ID -> testfoobarbaz") } // ensure we don't allow the use of invalid repository names; these tag operations should fail -func TestTagInvalidUnprefixedRepo(t *testing.T) { +func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd"} @@ -52,14 +44,13 @@ func TestTagInvalidUnprefixedRepo(t *testing.T) { tagCmd := exec.Command(dockerBinary, "tag", "busybox", repo) _, _, err := runCommandWithOutput(tagCmd) if err == nil { - t.Fatalf("tag busybox %v should have failed", repo) + c.Fatalf("tag busybox %v should have failed", repo) } } - logDone("tag - busybox invalid repo names --> must not work") } // ensure we don't allow the use of invalid tags; these tag operations should fail -func TestTagInvalidPrefixedRepo(t *testing.T) { +func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { longTag := stringutils.GenerateRandomAlphaOnlyString(121) invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} @@ -68,16 +59,15 @@ func TestTagInvalidPrefixedRepo(t *testing.T) { tagCmd := exec.Command(dockerBinary, "tag", "busybox", repotag) _, _, err := runCommandWithOutput(tagCmd) if err == nil { - t.Fatalf("tag busybox %v should have failed", repotag) + c.Fatalf("tag busybox %v should have failed", repotag) } } - logDone("tag - busybox with invalid repo:tagnames --> must not work") } // ensure we allow the use of valid tags -func TestTagValidPrefixedRepo(t *testing.T) { +func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { if err := pullImageIfNotExist("busybox:latest"); err != nil { - t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t"} @@ -86,56 +76,49 @@ func TestTagValidPrefixedRepo(t *testing.T) { tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", repo) _, _, err := runCommandWithOutput(tagCmd) if err != nil { - t.Errorf("tag busybox %v should have worked: %s", repo, err) + c.Errorf("tag busybox %v should have worked: %s", repo, err) continue } deleteImages(repo) } - logDone("tag - tag valid prefixed repo") } // tag an image with an existed tag name without -f option should fail -func TestTagExistedNameWithoutForce(t *testing.T) { +func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { if err := pullImageIfNotExist("busybox:latest"); err != nil { - t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } tagCmd = exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") out, _, err := runCommandWithOutput(tagCmd) if err == nil || !strings.Contains(out, "Conflict: Tag test is already set to image") { - t.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") + c.Fatal("tag busybox busybox:test should have failed,because busybox:test is existed") } - deleteImages("busybox:test") - - logDone("tag - busybox with an existed tag name without -f option --> must not work") } // tag an image with an existed tag name with -f option should work -func TestTagExistedNameWithForce(t *testing.T) { +func (s *DockerSuite) TestTagExistedNameWithForce(c *check.C) { if err := pullImageIfNotExist("busybox:latest"); err != nil { - t.Fatal("couldn't find the busybox:latest image locally and failed to pull it") + c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") } tagCmd := exec.Command(dockerBinary, "tag", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } tagCmd = exec.Command(dockerBinary, "tag", "-f", "busybox:latest", "busybox:test") if out, _, err := runCommandWithOutput(tagCmd); err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } - deleteImages("busybox:test") - - logDone("tag - busybox with an existed tag name with -f option work") } // ensure tagging using official names works // ensure all tags result in the same name -func TestTagOfficialNames(t *testing.T) { +func (s *DockerSuite) TestTagOfficialNames(c *check.C) { names := []string{ "docker.io/busybox", "index.docker.io/busybox", @@ -148,7 +131,7 @@ func TestTagOfficialNames(t *testing.T) { tagCmd := exec.Command(dockerBinary, "tag", "-f", "busybox:latest", name+":latest") out, exitCode, err := runCommandWithOutput(tagCmd) if err != nil || exitCode != 0 { - t.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) + c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) continue } @@ -156,9 +139,9 @@ func TestTagOfficialNames(t *testing.T) { imagesCmd := exec.Command(dockerBinary, "images") out, _, err = runCommandWithOutput(imagesCmd) if err != nil { - t.Errorf("listing images failed with errors: %v, %s", err, out) + c.Errorf("listing images failed with errors: %v, %s", err, out) } else if strings.Contains(out, name) { - t.Errorf("images should not have listed '%s'", name) + c.Errorf("images should not have listed '%s'", name) deleteImages(name + ":latest") } } @@ -167,10 +150,9 @@ func TestTagOfficialNames(t *testing.T) { tagCmd := exec.Command(dockerBinary, "tag", "-f", name+":latest", "fooo/bar:latest") _, exitCode, err := runCommandWithOutput(tagCmd) if err != nil || exitCode != 0 { - t.Errorf("tag %v fooo/bar should have worked: %s", name, err) + c.Errorf("tag %v fooo/bar should have worked: %s", name, err) continue } deleteImages("fooo/bar:latest") } - logDone("tag - tag official names") } diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go index b5dca0be983fd..f941a42cd0515 100644 --- a/integration-cli/docker_cli_top_test.go +++ b/integration-cli/docker_cli_top_test.go @@ -3,37 +3,36 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) -func TestTopMultipleArgs(t *testing.T) { +func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to start the container: %s, %v", out, err) + c.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) - defer deleteContainer(cleanedContainerID) topCmd := exec.Command(dockerBinary, "top", cleanedContainerID, "-o", "pid") out, _, err = runCommandWithOutput(topCmd) if err != nil { - t.Fatalf("failed to run top: %s, %v", out, err) + c.Fatalf("failed to run top: %s, %v", out, err) } if !strings.Contains(out, "PID") { - t.Fatalf("did not see PID after top -o pid: %s", out) + c.Fatalf("did not see PID after top -o pid: %s", out) } - logDone("top - multiple arguments") } -func TestTopNonPrivileged(t *testing.T) { +func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-i", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to start the container: %s, %v", out, err) + c.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -41,38 +40,37 @@ func TestTopNonPrivileged(t *testing.T) { topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) out1, _, err := runCommandWithOutput(topCmd) if err != nil { - t.Fatalf("failed to run top: %s, %v", out1, err) + c.Fatalf("failed to run top: %s, %v", out1, err) } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) out2, _, err := runCommandWithOutput(topCmd) if err != nil { - t.Fatalf("failed to run top: %s, %v", out2, err) + c.Fatalf("failed to run top: %s, %v", out2, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { - t.Fatalf("failed to kill container: %s, %v", out, err) + c.Fatalf("failed to kill container: %s, %v", out, err) } deleteContainer(cleanedContainerID) if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") { - t.Fatal("top should've listed `top` in the process list, but failed twice") + c.Fatal("top should've listed `top` in the process list, but failed twice") } else if !strings.Contains(out1, "top") { - t.Fatal("top should've listed `top` in the process list, but failed the first time") + c.Fatal("top should've listed `top` in the process list, but failed the first time") } else if !strings.Contains(out2, "top") { - t.Fatal("top should've listed `top` in the process list, but failed the second itime") + c.Fatal("top should've listed `top` in the process list, but failed the second itime") } - logDone("top - top process should be listed in non privileged mode") } -func TestTopPrivileged(t *testing.T) { +func (s *DockerSuite) TestTopPrivileged(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "--privileged", "-i", "-d", "busybox", "top") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatalf("failed to start the container: %s, %v", out, err) + c.Fatalf("failed to start the container: %s, %v", out, err) } cleanedContainerID := strings.TrimSpace(out) @@ -80,29 +78,28 @@ func TestTopPrivileged(t *testing.T) { topCmd := exec.Command(dockerBinary, "top", cleanedContainerID) out1, _, err := runCommandWithOutput(topCmd) if err != nil { - t.Fatalf("failed to run top: %s, %v", out1, err) + c.Fatalf("failed to run top: %s, %v", out1, err) } topCmd = exec.Command(dockerBinary, "top", cleanedContainerID) out2, _, err := runCommandWithOutput(topCmd) if err != nil { - t.Fatalf("failed to run top: %s, %v", out2, err) + c.Fatalf("failed to run top: %s, %v", out2, err) } killCmd := exec.Command(dockerBinary, "kill", cleanedContainerID) if out, _, err = runCommandWithOutput(killCmd); err != nil { - t.Fatalf("failed to kill container: %s, %v", out, err) + c.Fatalf("failed to kill container: %s, %v", out, err) } deleteContainer(cleanedContainerID) if !strings.Contains(out1, "top") && !strings.Contains(out2, "top") { - t.Fatal("top should've listed `top` in the process list, but failed twice") + c.Fatal("top should've listed `top` in the process list, but failed twice") } else if !strings.Contains(out1, "top") { - t.Fatal("top should've listed `top` in the process list, but failed the first time") + c.Fatal("top should've listed `top` in the process list, but failed the first time") } else if !strings.Contains(out2, "top") { - t.Fatal("top should've listed `top` in the process list, but failed the second itime") + c.Fatal("top should've listed `top` in the process list, but failed the second itime") } - logDone("top - top process should be listed in privileged mode") } diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go index ceaeba8e209ff..3616da988f0a7 100644 --- a/integration-cli/docker_cli_version_test.go +++ b/integration-cli/docker_cli_version_test.go @@ -3,15 +3,16 @@ package main import ( "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) // ensure docker version works -func TestVersionEnsureSucceeds(t *testing.T) { +func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { versionCmd := exec.Command(dockerBinary, "version") out, _, err := runCommandWithOutput(versionCmd) if err != nil { - t.Fatalf("failed to execute docker version: %s, %v", out, err) + c.Fatalf("failed to execute docker version: %s, %v", out, err) } stringsToCheck := []string{ @@ -29,9 +30,8 @@ func TestVersionEnsureSucceeds(t *testing.T) { for _, linePrefix := range stringsToCheck { if !strings.Contains(out, linePrefix) { - t.Errorf("couldn't find string %v in output", linePrefix) + c.Errorf("couldn't find string %v in output", linePrefix) } } - logDone("version - verify that it works and that the output is properly formatted") } diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go index cc0e778ea3ec6..21f04faf0f8cd 100644 --- a/integration-cli/docker_cli_wait_test.go +++ b/integration-cli/docker_cli_wait_test.go @@ -3,18 +3,18 @@ package main import ( "os/exec" "strings" - "testing" "time" + + "github.com/go-check/check" ) // non-blocking wait with 0 exit code -func TestWaitNonBlockedExitZero(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "true") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerID := strings.TrimSpace(out) @@ -23,13 +23,13 @@ func TestWaitNonBlockedExitZero(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.Running}}'", containerID) status, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(status, err) + c.Fatal(status, err) } status = strings.TrimSpace(status) time.Sleep(time.Second) if i >= 60 { - t.Fatal("Container should have stopped by now") + c.Fatal("Container should have stopped by now") } } @@ -37,41 +37,47 @@ func TestWaitNonBlockedExitZero(t *testing.T) { out, _, err = runCommandWithOutput(runCmd) if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + c.Fatal("failed to set up container", out, err) } - logDone("wait - non-blocking wait with 0 exit code") } // blocking wait with 0 exit code -func TestWaitBlockedExitZero(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' SIGTERM; while true; do sleep 0.01; done") + containerID := strings.TrimSpace(out) - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10") - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - t.Fatal(out, err) + if err := waitRun(containerID); err != nil { + c.Fatal(err) } - containerID := strings.TrimSpace(out) - runCmd = exec.Command(dockerBinary, "wait", containerID) - out, _, err = runCommandWithOutput(runCmd) + chWait := make(chan string) + go func() { + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() - if err != nil || strings.TrimSpace(out) != "0" { - t.Fatal("failed to set up container", out, err) + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) + + select { + case status := <-chWait: + if strings.TrimSpace(status) != "0" { + c.Fatalf("expected exit 0, got %s", status) + } + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") } - logDone("wait - blocking wait with 0 exit code") } // non-blocking wait with random exit code -func TestWaitNonBlockedExitRandom(t *testing.T) { - defer deleteAllContainers() +func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "exit 99") out, _, err := runCommandWithOutput(runCmd) if err != nil { - t.Fatal(out, err) + c.Fatal(out, err) } containerID := strings.TrimSpace(out) @@ -80,13 +86,13 @@ func TestWaitNonBlockedExitRandom(t *testing.T) { runCmd = exec.Command(dockerBinary, "inspect", "--format='{{.State.Running}}'", containerID) status, _, err = runCommandWithOutput(runCmd) if err != nil { - t.Fatal(status, err) + c.Fatal(status, err) } status = strings.TrimSpace(status) time.Sleep(time.Second) if i >= 60 { - t.Fatal("Container should have stopped by now") + c.Fatal("Container should have stopped by now") } } @@ -94,29 +100,37 @@ func TestWaitNonBlockedExitRandom(t *testing.T) { out, _, err = runCommandWithOutput(runCmd) if err != nil || strings.TrimSpace(out) != "99" { - t.Fatal("failed to set up container", out, err) + c.Fatal("failed to set up container", out, err) } - logDone("wait - non-blocking wait with random exit code") } // blocking wait with random exit code -func TestWaitBlockedExitRandom(t *testing.T) { - defer deleteAllContainers() - - runCmd := exec.Command(dockerBinary, "run", "-d", "busybox", "sh", "-c", "sleep 10; exit 99") - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - t.Fatal(out, err) - } +func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { + out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "trap 'exit 99' SIGTERM; while true; do sleep 0.01; done") containerID := strings.TrimSpace(out) + if err := waitRun(containerID); err != nil { + c.Fatal(err) + } + if err := waitRun(containerID); err != nil { + c.Fatal(err) + } - runCmd = exec.Command(dockerBinary, "wait", containerID) - out, _, err = runCommandWithOutput(runCmd) + chWait := make(chan string) + go func() { + out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) + chWait <- out + }() - if err != nil || strings.TrimSpace(out) != "99" { - t.Fatal("failed to set up container", out, err) - } + time.Sleep(100 * time.Millisecond) + dockerCmd(c, "stop", containerID) - logDone("wait - blocking wait with random exit code") + select { + case status := <-chWait: + if strings.TrimSpace(status) != "99" { + c.Fatalf("expected exit 99, got %s", status) + } + case <-time.After(2 * time.Second): + c.Fatal("timeout waiting for `docker wait` to exit") + } } diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go index 843a07a20ef7f..8386bb59ff962 100644 --- a/integration-cli/docker_utils.go +++ b/integration-cli/docker_utils.go @@ -18,16 +18,17 @@ import ( "path/filepath" "strconv" "strings" - "testing" "time" - "github.com/docker/docker/api" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/stringutils" + "github.com/go-check/check" ) // Daemon represents a Docker daemon for the testing framework. type Daemon struct { - t *testing.T + c *check.C logFile *os.File folder string stdin io.WriteCloser @@ -41,24 +42,24 @@ type Daemon struct { // NewDaemon returns a Daemon instance to be used for testing. // This will create a directory such as daemon123456789 in the folder specified by $DEST. // The daemon will not automatically start. -func NewDaemon(t *testing.T) *Daemon { +func NewDaemon(c *check.C) *Daemon { dest := os.Getenv("DEST") if dest == "" { - t.Fatal("Please set the DEST environment variable") + c.Fatal("Please set the DEST environment variable") } dir := filepath.Join(dest, fmt.Sprintf("daemon%d", time.Now().UnixNano()%100000000)) daemonFolder, err := filepath.Abs(dir) if err != nil { - t.Fatalf("Could not make %q an absolute path: %v", dir, err) + c.Fatalf("Could not make %q an absolute path: %v", dir, err) } if err := os.MkdirAll(filepath.Join(daemonFolder, "graph"), 0600); err != nil { - t.Fatalf("Could not create %s/graph directory", daemonFolder) + c.Fatalf("Could not create %s/graph directory", daemonFolder) } return &Daemon{ - t: t, + c: c, folder: daemonFolder, storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), execDriver: os.Getenv("DOCKER_EXECDRIVER"), @@ -70,7 +71,7 @@ func NewDaemon(t *testing.T) *Daemon { func (d *Daemon) Start(arg ...string) error { dockerBinary, err := exec.LookPath(dockerBinary) if err != nil { - d.t.Fatalf("could not find docker binary in $PATH: %v", err) + d.c.Fatalf("could not find docker binary in $PATH: %v", err) } args := []string{ @@ -104,7 +105,7 @@ func (d *Daemon) Start(arg ...string) error { d.logFile, err = os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) if err != nil { - d.t.Fatalf("Could not create %s/docker.log: %v", d.folder, err) + d.c.Fatalf("Could not create %s/docker.log: %v", d.folder, err) } d.cmd.Stdout = d.logFile @@ -118,7 +119,7 @@ func (d *Daemon) Start(arg ...string) error { go func() { wait <- d.cmd.Wait() - d.t.Log("exiting daemon") + d.c.Log("exiting daemon") close(wait) }() @@ -128,7 +129,7 @@ func (d *Daemon) Start(arg ...string) error { // make sure daemon is ready to receive requests startTime := time.Now().Unix() for { - d.t.Log("waiting for daemon to start") + d.c.Log("waiting for daemon to start") if time.Now().Unix()-startTime > 5 { // After 5 seconds, give up return errors.New("Daemon exited and never started") @@ -147,7 +148,7 @@ func (d *Daemon) Start(arg ...string) error { req, err := http.NewRequest("GET", "/_ping", nil) if err != nil { - d.t.Fatalf("could not create new request: %v", err) + d.c.Fatalf("could not create new request: %v", err) } resp, err := client.Do(req) @@ -155,10 +156,10 @@ func (d *Daemon) Start(arg ...string) error { continue } if resp.StatusCode != http.StatusOK { - d.t.Logf("received status != 200 OK: %s", resp.Status) + d.c.Logf("received status != 200 OK: %s", resp.Status) } - d.t.Log("daemon started") + d.c.Log("daemon started") return nil } } @@ -185,7 +186,7 @@ func (d *Daemon) StartWithBusybox(arg ...string) error { return fmt.Errorf("could not load busybox image: %v", err) } if err := os.Remove(bb); err != nil { - d.t.Logf("Could not remove %s: %v", bb, err) + d.c.Logf("Could not remove %s: %v", bb, err) } return nil } @@ -217,7 +218,7 @@ out1: return err case <-time.After(15 * time.Second): // time for stopping jobs and run onShutdown hooks - d.t.Log("timeout") + d.c.Log("timeout") break out1 } } @@ -230,10 +231,10 @@ out2: case <-tick: i++ if i > 4 { - d.t.Logf("tried to interrupt daemon for %d times, now try to kill it", i) + d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) break out2 } - d.t.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) + d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) if err := d.cmd.Process.Signal(os.Interrupt); err != nil { return fmt.Errorf("could not send signal: %v", err) } @@ -241,7 +242,7 @@ out2: } if err := d.cmd.Process.Kill(); err != nil { - d.t.Logf("Could not kill daemon: %v", err) + d.c.Logf("Could not kill daemon: %v", err) return err } @@ -268,12 +269,20 @@ func (d *Daemon) Cmd(name string, arg ...string) (string, error) { return string(b), err } +func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (string, error) { + args := append(daemonArgs, name) + args = append(args, arg...) + c := exec.Command(dockerBinary, args...) + b, err := c.CombinedOutput() + return string(b), err +} + func (d *Daemon) LogfileName() string { return d.logFile.Name() } func daemonHost() string { - daemonUrlStr := "unix://" + api.DEFAULTUNIXSOCKET + daemonUrlStr := "unix://" + opts.DefaultUnixSocket if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { daemonUrlStr = daemonHostVar } @@ -298,52 +307,61 @@ func sockConn(timeout time.Duration) (net.Conn, error) { } } -func sockRequest(method, endpoint string, data interface{}) ([]byte, error) { +func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { jsonData := bytes.NewBuffer(nil) if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return nil, err + return -1, nil, err } - return sockRequestRaw(method, endpoint, jsonData, "application/json") + res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") + if err != nil { + b, _ := ioutil.ReadAll(body) + return -1, b, err + } + var b []byte + b, err = readBody(body) + return res.StatusCode, b, err } -func sockRequestRaw(method, endpoint string, data io.Reader, ct string) ([]byte, error) { +func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { c, err := sockConn(time.Duration(10 * time.Second)) if err != nil { - return nil, fmt.Errorf("could not dial docker daemon: %v", err) + return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) } client := httputil.NewClientConn(c, nil) - defer client.Close() req, err := http.NewRequest(method, endpoint, data) if err != nil { - return nil, fmt.Errorf("could not create new request: %v", err) + client.Close() + return nil, nil, fmt.Errorf("could not create new request: %v", err) } - if ct == "" { - ct = "application/json" + if ct != "" { + req.Header.Set("Content-Type", ct) } - req.Header.Set("Content-Type", ct) resp, err := client.Do(req) if err != nil { - return nil, fmt.Errorf("could not perform request: %v", err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - body, _ := ioutil.ReadAll(resp.Body) - return body, fmt.Errorf("received status != 200 OK: %s", resp.Status) + client.Close() + return nil, nil, fmt.Errorf("could not perform request: %v", err) } + body := ioutils.NewReadCloserWrapper(resp.Body, func() error { + defer client.Close() + return resp.Body.Close() + }) + + return resp, body, nil +} - return ioutil.ReadAll(resp.Body) +func readBody(b io.ReadCloser) ([]byte, error) { + defer b.Close() + return ioutil.ReadAll(b) } func deleteContainer(container string) error { container = strings.TrimSpace(strings.Replace(container, "\n", " ", -1)) - killArgs := strings.Split(fmt.Sprintf("kill %v", container), " ") - runCommand(exec.Command(dockerBinary, killArgs...)) - rmArgs := strings.Split(fmt.Sprintf("rm -v %v", container), " ") + rmArgs := strings.Split(fmt.Sprintf("rm -fv %v", container), " ") exitCode, err := runCommand(exec.Command(dockerBinary, rmArgs...)) // set error manually if not set if exitCode != 0 && err == nil { @@ -376,6 +394,58 @@ func deleteAllContainers() error { return nil } +var protectedImages = map[string]struct{}{} + +func init() { + out, err := exec.Command(dockerBinary, "images").CombinedOutput() + if err != nil { + panic(err) + } + lines := strings.Split(string(out), "\n")[1:] + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + // just for case if we have dangling images in tested daemon + if imgTag != ":" { + protectedImages[imgTag] = struct{}{} + } + } +} + +func deleteAllImages() error { + out, err := exec.Command(dockerBinary, "images").CombinedOutput() + if err != nil { + return err + } + lines := strings.Split(string(out), "\n")[1:] + var imgs []string + for _, l := range lines { + if l == "" { + continue + } + fields := strings.Fields(l) + imgTag := fields[0] + ":" + fields[1] + if _, ok := protectedImages[imgTag]; !ok { + if fields[0] == "" { + imgs = append(imgs, fields[2]) + continue + } + imgs = append(imgs, imgTag) + } + } + if len(imgs) == 0 { + return nil + } + args := append([]string{"rmi", "-f"}, imgs...) + if err := exec.Command(dockerBinary, args...).Run(); err != nil { + return err + } + return nil +} + func getPausedContainers() (string, error) { getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) @@ -389,6 +459,9 @@ func getPausedContainers() (string, error) { func getSliceOfPausedContainers() ([]string, error) { out, err := getPausedContainers() if err == nil { + if len(out) == 0 { + return nil, err + } slice := strings.Split(strings.TrimSpace(out), "\n") return slice, err } @@ -426,8 +499,7 @@ func unpauseAllContainers() error { } func deleteImages(images ...string) error { - args := make([]string, 1, 2) - args[0] = "rmi" + args := []string{"rmi", "-f"} args = append(args, images...) rmiCmd := exec.Command(dockerBinary, args...) exitCode, err := runCommand(rmiCmd) @@ -435,7 +507,6 @@ func deleteImages(images ...string) error { if exitCode != 0 && err == nil { err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") } - return err } @@ -460,12 +531,12 @@ func pullImageIfNotExist(image string) (err error) { return } -func dockerCmd(t *testing.T, args ...string) (string, int, error) { +func dockerCmd(c *check.C, args ...string) (string, int) { out, status, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) if err != nil { - t.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err) + c.Fatalf("%q failed with errors: %s, %v", strings.Join(args, " "), out, err) } - return out, status, err + return out, status } // execute a docker command with a timeout @@ -478,7 +549,7 @@ func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, e } // execute a docker command in a directory -func dockerCmdInDir(t *testing.T, path string, args ...string) (string, int, error) { +func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { dockerCommand := exec.Command(dockerBinary, args...) dockerCommand.Dir = path out, status, err := runCommandWithOutput(dockerCommand) @@ -499,11 +570,11 @@ func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...strin return out, status, err } -func findContainerIP(t *testing.T, id string) string { +func findContainerIP(c *check.C, id string) string { cmd := exec.Command(dockerBinary, "inspect", "--format='{{ .NetworkSettings.IPAddress }}'", id) out, _, err := runCommandWithOutput(cmd) if err != nil { - t.Fatal(err, out) + c.Fatal(err, out) } return strings.Trim(out, " \r\n'") @@ -761,14 +832,14 @@ func getIDByName(name string) (string, error) { // getContainerState returns the exit code of the container // and true if it's running // the exit code should be ignored if it's running -func getContainerState(t *testing.T, id string) (int, bool, error) { +func getContainerState(c *check.C, id string) (int, bool, error) { var ( exitStatus int running bool ) - out, exitCode, err := dockerCmd(t, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) - if err != nil || exitCode != 0 { - return 0, false, fmt.Errorf("%q doesn't exist: %s", id, err) + out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) + if exitCode != 0 { + return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) } out = strings.Trim(out, "\n") @@ -967,28 +1038,28 @@ func fakeGIT(name string, files map[string]string, enforceLocalServer bool) (*Fa // Write `content` to the file at path `dst`, creating it if necessary, // as well as any missing directories. // The file is truncated if it already exists. -// Call t.Fatal() at the first error. -func writeFile(dst, content string, t *testing.T) { +// Call c.Fatal() at the first error. +func writeFile(dst, content string, c *check.C) { // Create subdirectories if necessary if err := os.MkdirAll(path.Dir(dst), 0700); err != nil && !os.IsExist(err) { - t.Fatal(err) + c.Fatal(err) } f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Write content (truncate if it exists) if _, err := io.Copy(f, strings.NewReader(content)); err != nil { - t.Fatal(err) + c.Fatal(err) } } // Return the contents of file at path `src`. -// Call t.Fatal() at the first error (including if the file doesn't exist) -func readFile(src string, t *testing.T) (content string) { +// Call c.Fatal() at the first error (including if the file doesn't exist) +func readFile(src string, c *check.C) (content string) { data, err := ioutil.ReadFile(src) if err != nil { - t.Fatal(err) + c.Fatal(err) } return string(data) @@ -1033,36 +1104,35 @@ func readContainerFileWithExec(containerId, filename string) ([]byte, error) { } // daemonTime provides the current time on the daemon host -func daemonTime(t *testing.T) time.Time { +func daemonTime(c *check.C) time.Time { if isLocalDaemon { return time.Now() } - body, err := sockRequest("GET", "/info", nil) - if err != nil { - t.Fatalf("daemonTime: failed to get /info: %v", err) - } + status, body, err := sockRequest("GET", "/info", nil) + c.Assert(status, check.Equals, http.StatusOK) + c.Assert(err, check.IsNil) type infoJSON struct { SystemTime string } var info infoJSON if err = json.Unmarshal(body, &info); err != nil { - t.Fatalf("unable to unmarshal /info response: %v", err) + c.Fatalf("unable to unmarshal /info response: %v", err) } dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) if err != nil { - t.Fatal(err) + c.Fatal(err) } return dt } -func setupRegistry(t *testing.T) func() { - testRequires(t, RegistryHosting) - reg, err := newTestRegistryV2(t) +func setupRegistry(c *check.C) *testRegistryV2 { + testRequires(c, RegistryHosting) + reg, err := newTestRegistryV2(c) if err != nil { - t.Fatal(err) + c.Fatal(err) } // Wait for registry to be ready to serve requests. @@ -1074,10 +1144,9 @@ func setupRegistry(t *testing.T) func() { } if err != nil { - t.Fatal("Timeout waiting for test registry to become available") + c.Fatal("Timeout waiting for test registry to become available") } - - return func() { reg.Close() } + return reg } // appendBaseEnv appends the minimum set of environment variables to exec the diff --git a/integration/fixtures/https/ca.pem b/integration-cli/fixtures/https/ca.pem similarity index 100% rename from integration/fixtures/https/ca.pem rename to integration-cli/fixtures/https/ca.pem diff --git a/integration/fixtures/https/client-cert.pem b/integration-cli/fixtures/https/client-cert.pem similarity index 100% rename from integration/fixtures/https/client-cert.pem rename to integration-cli/fixtures/https/client-cert.pem diff --git a/integration/fixtures/https/client-key.pem b/integration-cli/fixtures/https/client-key.pem similarity index 100% rename from integration/fixtures/https/client-key.pem rename to integration-cli/fixtures/https/client-key.pem diff --git a/integration/fixtures/https/client-rogue-cert.pem b/integration-cli/fixtures/https/client-rogue-cert.pem similarity index 100% rename from integration/fixtures/https/client-rogue-cert.pem rename to integration-cli/fixtures/https/client-rogue-cert.pem diff --git a/integration/fixtures/https/client-rogue-key.pem b/integration-cli/fixtures/https/client-rogue-key.pem similarity index 100% rename from integration/fixtures/https/client-rogue-key.pem rename to integration-cli/fixtures/https/client-rogue-key.pem diff --git a/integration/fixtures/https/server-cert.pem b/integration-cli/fixtures/https/server-cert.pem similarity index 100% rename from integration/fixtures/https/server-cert.pem rename to integration-cli/fixtures/https/server-cert.pem diff --git a/integration/fixtures/https/server-key.pem b/integration-cli/fixtures/https/server-key.pem similarity index 100% rename from integration/fixtures/https/server-key.pem rename to integration-cli/fixtures/https/server-key.pem diff --git a/integration/fixtures/https/server-rogue-cert.pem b/integration-cli/fixtures/https/server-rogue-cert.pem similarity index 100% rename from integration/fixtures/https/server-rogue-cert.pem rename to integration-cli/fixtures/https/server-rogue-cert.pem diff --git a/integration/fixtures/https/server-rogue-key.pem b/integration-cli/fixtures/https/server-rogue-key.pem similarity index 100% rename from integration/fixtures/https/server-rogue-key.pem rename to integration-cli/fixtures/https/server-rogue-key.pem diff --git a/integration-cli/registry.go b/integration-cli/registry.go index 8290e710fd631..2801eacb5f514 100644 --- a/integration-cli/registry.go +++ b/integration-cli/registry.go @@ -7,7 +7,8 @@ import ( "os" "os/exec" "path/filepath" - "testing" + + "github.com/go-check/check" ) const v2binary = "registry-v2" @@ -17,7 +18,7 @@ type testRegistryV2 struct { dir string } -func newTestRegistryV2(t *testing.T) (*testRegistryV2, error) { +func newTestRegistryV2(c *check.C) (*testRegistryV2, error) { template := `version: 0.1 loglevel: debug storage: @@ -43,7 +44,7 @@ http: if err := cmd.Start(); err != nil { os.RemoveAll(tmp) if os.IsNotExist(err) { - t.Skip() + c.Skip(err.Error()) } return nil, err } diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go index cdd9991873947..cc451bd886481 100644 --- a/integration-cli/requirements.go +++ b/integration-cli/requirements.go @@ -7,7 +7,8 @@ import ( "net/http" "os/exec" "strings" - "testing" + + "github.com/go-check/check" ) type TestCondition func() bool @@ -57,8 +58,8 @@ var ( func() bool { if daemonExecDriver == "" { // get daemon info - body, err := sockRequest("GET", "/info", nil) - if err != nil { + status, body, err := sockRequest("GET", "/info", nil) + if err != nil || status != http.StatusOK { log.Fatalf("sockRequest failed for /info: %v", err) } @@ -92,10 +93,10 @@ var ( // testRequires checks if the environment satisfies the requirements // for the test to run or skips the tests. -func testRequires(t *testing.T, requirements ...TestRequirement) { +func testRequires(c *check.C, requirements ...TestRequirement) { for _, r := range requirements { if !r.Condition() { - t.Skip(r.SkipMessage) + c.Skip(r.SkipMessage) } } } diff --git a/integration-cli/utils.go b/integration-cli/utils.go index 536f6984e22ab..f0de79ea8f071 100644 --- a/integration-cli/utils.go +++ b/integration-cli/utils.go @@ -143,6 +143,7 @@ func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode in if i > 0 { prevCmd := cmds[i-1] cmd.Stdin, err = prevCmd.StdoutPipe() + if err != nil { return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) } @@ -167,13 +168,8 @@ func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode in return runCommandWithOutput(cmds[len(cmds)-1]) } -func logDone(message string) { - fmt.Printf("[PASSED]: %.69s\n", message) -} - func unmarshalJSON(data []byte, result interface{}) error { - err := json.Unmarshal(data, result) - if err != nil { + if err := json.Unmarshal(data, result); err != nil { return err } @@ -213,7 +209,16 @@ func waitInspect(name, expr, expected string, timeout int) error { cmd := exec.Command(dockerBinary, "inspect", "-f", expr, name) out, _, err := runCommandWithOutput(cmd) if err != nil { - return fmt.Errorf("error executing docker inspect: %v", err) + if !strings.Contains(out, "No such") { + return fmt.Errorf("error executing docker inspect: %v\n%s", err, out) + } + select { + case <-after: + return err + default: + time.Sleep(10 * time.Millisecond) + continue + } } out = strings.TrimSpace(out) diff --git a/integration/api_test.go b/integration/api_test.go index 98e683d0040ac..e45fa97e8288e 100644 --- a/integration/api_test.go +++ b/integration/api_test.go @@ -4,337 +4,22 @@ import ( "bufio" "bytes" "encoding/json" - "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" - "strings" "testing" "time" "github.com/docker/docker/api" "github.com/docker/docker/api/server" "github.com/docker/docker/api/types" - "github.com/docker/docker/builder" "github.com/docker/docker/engine" "github.com/docker/docker/runconfig" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) -func TestSaveImageAndThenLoad(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // save image - r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } - tarball := r.Body - - // delete the image - r = httptest.NewRecorder() - req, err = http.NewRequest("DELETE", "/images/"+unitTestImageID, nil) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } - - // make sure there is no image - r = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusNotFound { - t.Fatalf("%d NotFound expected, received %d\n", http.StatusNotFound, r.Code) - } - - // load the image - r = httptest.NewRecorder() - req, err = http.NewRequest("POST", "/images/load", tarball) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } - - // finally make sure the image is there - r = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/images/"+unitTestImageID+"/get", nil) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusOK { - t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) - } -} - -func TestGetContainersTop(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"/bin/sh", "-c", "cat"}, - OpenStdin: true, - }, - t, - ) - defer func() { - // Make sure the process dies before destroying daemon - containerKill(eng, containerID, t) - containerWait(eng, containerID, t) - }() - - startContainer(eng, containerID, t) - - setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { - for { - if containerRunning(eng, containerID, t) { - break - } - time.Sleep(10 * time.Millisecond) - } - }) - - if !containerRunning(eng, containerID, t) { - t.Fatalf("Container should be running") - } - - // Make sure sh spawn up cat - setTimeout(t, "read/write assertion timed out", 2*time.Second, func() { - in, out := containerAttach(eng, containerID, t) - if err := assertPipe("hello\n", "hello", out, in, 150); err != nil { - t.Fatal(err) - } - }) - - r := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/containers/"+containerID+"/top?ps_args=aux", nil) - if err != nil { - t.Fatal(err) - } - server.ServeRequest(eng, api.APIVERSION, r, req) - assertHttpNotError(r, t) - var procs engine.Env - if err := procs.Decode(r.Body); err != nil { - t.Fatal(err) - } - - if len(procs.GetList("Titles")) != 11 { - t.Fatalf("Expected 11 titles, found %d.", len(procs.GetList("Titles"))) - } - if procs.GetList("Titles")[0] != "USER" || procs.GetList("Titles")[10] != "COMMAND" { - t.Fatalf("Expected Titles[0] to be USER and Titles[10] to be COMMAND, found %s and %s.", procs.GetList("Titles")[0], procs.GetList("Titles")[10]) - } - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - t.Fatal(err) - } - if len(processes) != 2 { - t.Fatalf("Expected 2 processes, found %d.", len(processes)) - } - if processes[0][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[0][10]) - } - if processes[1][10] != "/bin/sh -c cat" { - t.Fatalf("Expected `/bin/sh -c cat`, found %s.", processes[1][10]) - } -} - -func TestPostCommit(t *testing.T) { - eng := NewTestEngine(t) - b := &builder.BuilderJob{Engine: eng} - b.Install() - defer mkDaemonFromEngine(eng, t).Nuke() - - // Create a container and remove a file - containerID := createTestContainer(eng, - &runconfig.Config{ - Image: unitTestImageID, - Cmd: []string{"touch", "/test"}, - }, - t, - ) - - containerRun(eng, containerID, t) - - req, err := http.NewRequest("POST", "/commit?repo=testrepo&testtag=tag&container="+containerID, bytes.NewReader([]byte{})) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - server.ServeRequest(eng, api.APIVERSION, r, req) - assertHttpNotError(r, t) - if r.Code != http.StatusCreated { - t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) - } - - var env engine.Env - if err := env.Decode(r.Body); err != nil { - t.Fatal(err) - } - if err := eng.Job("image_inspect", env.Get("Id")).Run(); err != nil { - t.Fatalf("The image has not been committed") - } -} - -func TestPostContainersCreate(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - configJSON, err := json.Marshal(&runconfig.Config{ - Image: unitTestImageID, - Memory: 33554432, - Cmd: []string{"touch", "/test"}, - }) - if err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Content-Type", "application/json") - - r := httptest.NewRecorder() - server.ServeRequest(eng, api.APIVERSION, r, req) - assertHttpNotError(r, t) - if r.Code != http.StatusCreated { - t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) - } - - var apiRun engine.Env - if err := apiRun.Decode(r.Body); err != nil { - t.Fatal(err) - } - containerID := apiRun.Get("Id") - - containerAssertExists(eng, containerID, t) - containerRun(eng, containerID, t) - - if !containerFileExists(eng, containerID, "test", t) { - t.Fatal("Test file was not created") - } -} - -func TestPostJsonVerify(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - configJSON, err := json.Marshal(&runconfig.Config{ - Image: unitTestImageID, - Memory: 33554432, - Cmd: []string{"touch", "/test"}, - }) - if err != nil { - t.Fatal(err) - } - - req, err := http.NewRequest("POST", "/containers/create", bytes.NewReader(configJSON)) - if err != nil { - t.Fatal(err) - } - - r := httptest.NewRecorder() - - server.ServeRequest(eng, api.APIVERSION, r, req) - - // Don't add Content-Type header - // req.Header.Set("Content-Type", "application/json") - - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { - t.Fatal("Create should have failed due to no Content-Type header - got:", r) - } - - // Now add header but with wrong type and retest - req.Header.Set("Content-Type", "application/xml") - - server.ServeRequest(eng, api.APIVERSION, r, req) - if r.Code != http.StatusInternalServerError || !strings.Contains(((*r.Body).String()), "application/json") { - t.Fatal("Create should have failed due to wrong Content-Type header - got:", r) - } -} - -// Issue 7941 - test to make sure a "null" in JSON is just ignored. -// W/o this fix a null in JSON would be parsed into a string var as "null" -func TestPostCreateNull(t *testing.T) { - eng := NewTestEngine(t) - daemon := mkDaemonFromEngine(eng, t) - defer daemon.Nuke() - - configStr := fmt.Sprintf(`{ - "Hostname":"", - "Domainname":"", - "Memory":0, - "MemorySwap":0, - "CpuShares":0, - "Cpuset":null, - "AttachStdin":true, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "ExposedPorts":{}, - "Tty":true, - "OpenStdin":true, - "StdinOnce":true, - "Env":[], - "Cmd":"ls", - "Image":"%s", - "Volumes":{}, - "WorkingDir":"", - "Entrypoint":null, - "NetworkDisabled":false, - "OnBuild":null}`, unitTestImageID) - - req, err := http.NewRequest("POST", "/containers/create", strings.NewReader(configStr)) - if err != nil { - t.Fatal(err) - } - - req.Header.Set("Content-Type", "application/json") - - r := httptest.NewRecorder() - server.ServeRequest(eng, api.APIVERSION, r, req) - assertHttpNotError(r, t) - if r.Code != http.StatusCreated { - t.Fatalf("%d Created expected, received %d\n", http.StatusCreated, r.Code) - } - - var apiRun engine.Env - if err := apiRun.Decode(r.Body); err != nil { - t.Fatal(err) - } - containerID := apiRun.Get("Id") - - containerAssertExists(eng, containerID, t) - - c, _ := daemon.Get(containerID) - if c.Config.Cpuset != "" { - t.Fatalf("Cpuset should have been empty - instead its:" + c.Config.Cpuset) - } -} - func TestPostContainersKill(t *testing.T) { eng := NewTestEngine(t) defer mkDaemonFromEngine(eng, t).Nuke() @@ -342,7 +27,7 @@ func TestPostContainersKill(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/cat"}, + Cmd: runconfig.NewCommand("/bin/cat"), OpenStdin: true, }, t, @@ -379,7 +64,7 @@ func TestPostContainersRestart(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/top"}, + Cmd: runconfig.NewCommand("/bin/top"), OpenStdin: true, }, t, @@ -423,7 +108,7 @@ func TestPostContainersStart(t *testing.T) { eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/cat"}, + Cmd: runconfig.NewCommand("/bin/cat"), OpenStdin: true, }, t, @@ -473,7 +158,7 @@ func TestPostContainersStop(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/top"}, + Cmd: runconfig.NewCommand("/bin/top"), OpenStdin: true, }, t, @@ -525,7 +210,7 @@ func TestPostContainersWait(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/sleep", "1"}, + Cmd: runconfig.NewCommand("/bin/sleep", "1"), OpenStdin: true, }, t, @@ -561,7 +246,7 @@ func TestPostContainersAttach(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/cat"}, + Cmd: runconfig.NewCommand("/bin/cat"), OpenStdin: true, }, t, @@ -637,7 +322,7 @@ func TestPostContainersAttachStderr(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"/bin/sh", "-c", "/bin/cat >&2"}, + Cmd: runconfig.NewCommand("/bin/sh", "-c", "/bin/cat >&2"), OpenStdin: true, }, t, @@ -749,7 +434,7 @@ func TestGetEnabledCors(t *testing.T) { t.Errorf("Expected header Access-Control-Allow-Headers to be \"Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth\", %s found.", allowHeaders) } if allowMethods != "GET, POST, DELETE, PUT, OPTIONS" { - t.Errorf("Expected hearder Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) + t.Errorf("Expected header Access-Control-Allow-Methods to be \"GET, POST, DELETE, PUT, OPTIONS\", %s found.", allowMethods) } } @@ -761,7 +446,8 @@ func TestDeleteImages(t *testing.T) { initialImages := getImages(eng, t, true, "") - if err := eng.Job("tag", unitTestImageName, "test", "test").Run(); err != nil { + d := getDaemon(eng) + if err := d.Repositories().Tag("test", "test", unitTestImageName, true); err != nil { t.Fatal(err) } @@ -818,7 +504,7 @@ func TestPostContainersCopy(t *testing.T) { containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, - Cmd: []string{"touch", "/test.txt"}, + Cmd: runconfig.NewCommand("touch", "/test.txt"), }, t, ) @@ -932,7 +618,7 @@ func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) { req.Header.Add("Content-Type", "application/json") // This is a cheat to make the http request do chunked encoding // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite - // http://golang.org/src/pkg/net/http/request.go?s=11980:12172 + // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 req.ContentLength = -1 server.ServeRequest(eng, api.APIVERSION, r, req) assertHttpNotError(r, t) @@ -962,7 +648,7 @@ func TestConstainersStartChunkedEncodingHostConfig(t *testing.T) { } if c.HostConfig.Binds[0] != "/tmp:/foo" { - t.Fatal("Chunked encoding not properly handled, execpted binds to be /tmp:/foo, got:", c.HostConfig.Binds[0]) + t.Fatal("Chunked encoding not properly handled, expected binds to be /tmp:/foo, got:", c.HostConfig.Binds[0]) } } diff --git a/integration/commands_test.go b/integration/commands_test.go deleted file mode 100644 index 97a927b8bf3b6..0000000000000 --- a/integration/commands_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package docker - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/term" - "github.com/kr/pty" -) - -func closeWrap(args ...io.Closer) error { - e := false - ret := fmt.Errorf("Error closing elements") - for _, c := range args { - if err := c.Close(); err != nil { - e = true - ret = fmt.Errorf("%s\n%s", ret, err) - } - } - if e { - return ret - } - return nil -} - -func setRaw(t *testing.T, c *daemon.Container) *term.State { - pty, err := c.GetPtyMaster() - if err != nil { - t.Fatal(err) - } - state, err := term.MakeRaw(pty.Fd()) - if err != nil { - t.Fatal(err) - } - return state -} - -func unsetRaw(t *testing.T, c *daemon.Container, state *term.State) { - pty, err := c.GetPtyMaster() - if err != nil { - t.Fatal(err) - } - term.RestoreTerminal(pty.Fd(), state) -} - -func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container { - var container *daemon.Container - - setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { - for { - l := globalDaemon.List() - if len(l) == 1 && l[0].IsRunning() { - container = l[0] - break - } - time.Sleep(10 * time.Millisecond) - } - }) - - if container == nil { - t.Fatal("An error occured while waiting for the container to start") - } - - return container -} - -func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { - c := make(chan bool) - - // Make sure we are not too long - go func() { - time.Sleep(d) - c <- true - }() - go func() { - f() - c <- false - }() - if <-c && msg != "" { - t.Fatal(msg) - } -} - -func expectPipe(expected string, r io.Reader) error { - o, err := bufio.NewReader(r).ReadString('\n') - if err != nil { - return err - } - if strings.Trim(o, " \r\n") != expected { - return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) - } - return nil -} - -func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { - for i := 0; i < count; i++ { - if _, err := w.Write([]byte(input)); err != nil { - return err - } - if err := expectPipe(output, r); err != nil { - return err - } - } - return nil -} - -// TestRunDetach checks attaching and detaching with the escape sequence. -func TestRunDetach(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - cpty, tty, err := pty.Open() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - ch := make(chan struct{}) - go func() { - defer close(ch) - cli.CmdRun("-i", "-t", unitTestImageID, "cat") - }() - - container := waitContainerStart(t, 10*time.Second) - - state := setRaw(t, container) - defer unsetRaw(t, container, state) - - setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { - t.Fatal(err) - } - }) - - setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - cpty.Write([]byte{16}) - time.Sleep(100 * time.Millisecond) - cpty.Write([]byte{17}) - }) - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdRun timed out", 15*time.Second, func() { - <-ch - }) - closeWrap(cpty, stdout, stdoutPipe) - - time.Sleep(500 * time.Millisecond) - if !container.IsRunning() { - t.Fatal("The detached container should be still running") - } - - setTimeout(t, "Waiting for container to die timed out", 20*time.Second, func() { - container.Kill() - }) -} - -// TestAttachDetach checks that attach in tty mode can be detached using the long container ID -func TestAttachDetach(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - cpty, tty, err := pty.Open() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - ch := make(chan struct{}) - go func() { - defer close(ch) - if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { - t.Fatal(err) - } - }() - - container := waitContainerStart(t, 10*time.Second) - - setTimeout(t, "Reading container's id timed out", 10*time.Second, func() { - buf := make([]byte, 1024) - n, err := stdout.Read(buf) - if err != nil { - t.Fatal(err) - } - - if strings.Trim(string(buf[:n]), " \r\n") != container.ID { - t.Fatalf("Wrong ID received. Expect %s, received %s", container.ID, buf[:n]) - } - }) - setTimeout(t, "Starting container timed out", 10*time.Second, func() { - <-ch - }) - - state := setRaw(t, container) - defer unsetRaw(t, container, state) - - stdout, stdoutPipe = io.Pipe() - cpty, tty, err = pty.Open() - if err != nil { - t.Fatal(err) - } - - cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - - ch = make(chan struct{}) - go func() { - defer close(ch) - if err := cli.CmdAttach(container.ID); err != nil { - if err != io.ErrClosedPipe { - t.Fatal(err) - } - } - }() - - setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { - if err != io.ErrClosedPipe { - t.Fatal(err) - } - } - }) - - setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - cpty.Write([]byte{16}) - time.Sleep(100 * time.Millisecond) - cpty.Write([]byte{17}) - }) - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { - <-ch - }) - - closeWrap(cpty, stdout, stdoutPipe) - - time.Sleep(500 * time.Millisecond) - if !container.IsRunning() { - t.Fatal("The detached container should be still running") - } - - setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { - container.Kill() - }) -} - -// TestAttachDetachTruncatedID checks that attach in tty mode can be detached -func TestAttachDetachTruncatedID(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - cpty, tty, err := pty.Open() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - // Discard the CmdRun output - go stdout.Read(make([]byte, 1024)) - setTimeout(t, "Starting container timed out", 2*time.Second, func() { - if err := cli.CmdRun("-i", "-t", "-d", unitTestImageID, "cat"); err != nil { - t.Fatal(err) - } - }) - - container := waitContainerStart(t, 10*time.Second) - - state := setRaw(t, container) - defer unsetRaw(t, container, state) - - stdout, stdoutPipe = io.Pipe() - cpty, tty, err = pty.Open() - if err != nil { - t.Fatal(err) - } - - cli = client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - - ch := make(chan struct{}) - go func() { - defer close(ch) - if err := cli.CmdAttach(stringid.TruncateID(container.ID)); err != nil { - if err != io.ErrClosedPipe { - t.Fatal(err) - } - } - }() - - setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { - if err != io.ErrClosedPipe { - t.Fatal(err) - } - } - }) - - setTimeout(t, "Escape sequence timeout", 5*time.Second, func() { - cpty.Write([]byte{16}) - time.Sleep(100 * time.Millisecond) - cpty.Write([]byte{17}) - }) - - // wait for CmdRun to return - setTimeout(t, "Waiting for CmdAttach timed out", 15*time.Second, func() { - <-ch - }) - closeWrap(cpty, stdout, stdoutPipe) - - time.Sleep(500 * time.Millisecond) - if !container.IsRunning() { - t.Fatal("The detached container should be still running") - } - - setTimeout(t, "Waiting for container to die timedout", 5*time.Second, func() { - container.Kill() - }) -} - -// Expected behaviour, the process stays alive when the client disconnects -func TestAttachDisconnect(t *testing.T) { - stdout, stdoutPipe := io.Pipe() - cpty, tty, err := pty.Open() - if err != nil { - t.Fatal(err) - } - - cli := client.NewDockerCli(tty, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - go func() { - // Start a process in daemon mode - if err := cli.CmdRun("-d", "-i", unitTestImageID, "/bin/cat"); err != nil { - logrus.Debugf("Error CmdRun: %s", err) - } - }() - - setTimeout(t, "Waiting for CmdRun timed out", 10*time.Second, func() { - if _, err := bufio.NewReader(stdout).ReadString('\n'); err != nil { - t.Fatal(err) - } - }) - - setTimeout(t, "Waiting for the container to be started timed out", 10*time.Second, func() { - for { - l := globalDaemon.List() - if len(l) == 1 && l[0].IsRunning() { - break - } - time.Sleep(10 * time.Millisecond) - } - }) - - container := globalDaemon.List()[0] - - // Attach to it - c1 := make(chan struct{}) - go func() { - // We're simulating a disconnect so the return value doesn't matter. What matters is the - // fact that CmdAttach returns. - cli.CmdAttach(container.ID) - close(c1) - }() - - setTimeout(t, "First read/write assertion timed out", 2*time.Second, func() { - if err := assertPipe("hello\n", "hello", stdout, cpty, 150); err != nil { - t.Fatal(err) - } - }) - // Close pipes (client disconnects) - if err := closeWrap(cpty, stdout, stdoutPipe); err != nil { - t.Fatal(err) - } - - // Wait for attach to finish, the client disconnected, therefore, Attach finished his job - setTimeout(t, "Waiting for CmdAttach timed out", 2*time.Second, func() { - <-c1 - }) - - // We closed stdin, expect /bin/cat to still be running - // Wait a little bit to make sure container.monitor() did his thing - _, err = container.WaitStop(500 * time.Millisecond) - if err == nil || !container.IsRunning() { - t.Fatalf("/bin/cat is not running after closing stdin") - } - - // Try to avoid the timeout in destroy. Best effort, don't check error - cStdin := container.StdinPipe() - cStdin.Close() - container.WaitStop(-1 * time.Second) -} - -// Expected behaviour: container gets deleted automatically after exit -func TestRunAutoRemove(t *testing.T) { - t.Skip("Fixme. Skipping test for now, race condition") - stdout, stdoutPipe := io.Pipe() - - cli := client.NewDockerCli(nil, stdoutPipe, ioutil.Discard, "", testDaemonProto, testDaemonAddr, nil) - defer cleanup(globalEngine, t) - - c := make(chan struct{}) - go func() { - defer close(c) - if err := cli.CmdRun("--rm", unitTestImageID, "hostname"); err != nil { - t.Fatal(err) - } - }() - - var temporaryContainerID string - setTimeout(t, "Reading command output time out", 2*time.Second, func() { - cmdOutput, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - t.Fatal(err) - } - temporaryContainerID = cmdOutput - if err := closeWrap(stdout, stdoutPipe); err != nil { - t.Fatal(err) - } - }) - - setTimeout(t, "CmdRun timed out", 10*time.Second, func() { - <-c - }) - - time.Sleep(500 * time.Millisecond) - - if len(globalDaemon.List()) > 0 { - t.Fatalf("failed to remove container automatically: container %s still exists", temporaryContainerID) - } -} diff --git a/integration/container_test.go b/integration/container_test.go index b6cbfd096103a..9256e9997f2a4 100644 --- a/integration/container_test.go +++ b/integration/container_test.go @@ -14,7 +14,7 @@ func TestRestartStdin(t *testing.T) { defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"cat"}, + Cmd: runconfig.NewCommand("cat"), OpenStdin: true, }, @@ -79,7 +79,7 @@ func TestStdin(t *testing.T) { defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"cat"}, + Cmd: runconfig.NewCommand("cat"), OpenStdin: true, }, @@ -119,7 +119,7 @@ func TestTty(t *testing.T) { defer nuke(daemon) container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"cat"}, + Cmd: runconfig.NewCommand("cat"), OpenStdin: true, }, @@ -160,7 +160,7 @@ func BenchmarkRunSequential(b *testing.B) { for i := 0; i < b.N; i++ { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"echo", "-n", "foo"}, + Cmd: runconfig.NewCommand("echo", "-n", "foo"), }, &runconfig.HostConfig{}, "", @@ -194,7 +194,7 @@ func BenchmarkRunParallel(b *testing.B) { go func(i int, complete chan error) { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"echo", "-n", "foo"}, + Cmd: runconfig.NewCommand("echo", "-n", "foo"), }, &runconfig.HostConfig{}, "", @@ -213,7 +213,7 @@ func BenchmarkRunParallel(b *testing.B) { return } // if string(output) != "foo" { - // complete <- fmt.Errorf("Unexecpted output: %v", string(output)) + // complete <- fmt.Errorf("Unexpected output: %v", string(output)) // } if err := daemon.Rm(container); err != nil { complete <- err diff --git a/integration/https_test.go b/integration/https_test.go deleted file mode 100644 index 17d69345a9529..0000000000000 --- a/integration/https_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package docker - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "strings" - "testing" - "time" - - "github.com/docker/docker/api/client" -) - -const ( - errBadCertificate = "remote error: bad certificate" - errCaUnknown = "x509: certificate signed by unknown authority" -) - -func getTlsConfig(certFile, keyFile string, t *testing.T) *tls.Config { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile("fixtures/https/ca.pem") - if err != nil { - t.Fatal(err) - } - certPool.AppendCertsFromPEM(file) - - cert, err := tls.LoadX509KeyPair("fixtures/https/"+certFile, "fixtures/https/"+keyFile) - if err != nil { - t.Fatalf("Couldn't load X509 key pair: %s", err) - } - tlsConfig := &tls.Config{ - RootCAs: certPool, - Certificates: []tls.Certificate{cert}, - } - return tlsConfig -} - -// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint -func TestHttpsInfo(t *testing.T) { - cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, - testDaemonHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) - - setTimeout(t, "Reading command output time out", 10*time.Second, func() { - if err := cli.CmdInfo(); err != nil { - t.Fatal(err) - } - }) -} - -// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint -// by using a rogue client certificate and checks that it fails with the expected error. -func TestHttpsInfoRogueCert(t *testing.T) { - cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, - testDaemonHttpsAddr, getTlsConfig("client-rogue-cert.pem", "client-rogue-key.pem", t)) - - setTimeout(t, "Reading command output time out", 10*time.Second, func() { - err := cli.CmdInfo() - if err == nil { - t.Fatal("Expected error but got nil") - } - if !strings.Contains(err.Error(), errBadCertificate) { - t.Fatalf("Expected error: %s, got instead: %s", errBadCertificate, err) - } - }) -} - -// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint -// which provides a rogue server certificate and checks that it fails with the expected error -func TestHttpsInfoRogueServerCert(t *testing.T) { - cli := client.NewDockerCli(nil, ioutil.Discard, ioutil.Discard, "", testDaemonProto, - testDaemonRogueHttpsAddr, getTlsConfig("client-cert.pem", "client-key.pem", t)) - - setTimeout(t, "Reading command output time out", 10*time.Second, func() { - err := cli.CmdInfo() - if err == nil { - t.Fatal("Expected error but got nil") - } - - if !strings.Contains(err.Error(), errCaUnknown) { - t.Fatalf("Expected error: %s, got instead: %s", errCaUnknown, err) - } - - }) -} diff --git a/integration/runtime_test.go b/integration/runtime_test.go index b5e404d59dfe2..a2f22072c36b8 100644 --- a/integration/runtime_test.go +++ b/integration/runtime_test.go @@ -17,12 +17,15 @@ import ( "time" "github.com/Sirupsen/logrus" + apiserver "github.com/docker/docker/api/server" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/engine" "github.com/docker/docker/graph" "github.com/docker/docker/image" "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/stringid" @@ -45,9 +48,7 @@ const ( ) var ( - // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. globalDaemon *daemon.Daemon - globalEngine *engine.Engine globalHttpsEngine *engine.Engine globalRogueHttpsEngine *engine.Engine startFds int @@ -119,21 +120,28 @@ func init() { // Create the "global daemon" with a long-running daemons for integration tests spawnGlobalDaemon() - spawnLegitHttpsDaemon() - spawnRogueHttpsDaemon() - startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() + startFds, startGoroutines = fileutils.GetTotalUsedFds(), runtime.NumGoroutine() } func setupBaseImage() { eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) - job := eng.Job("image_inspect", unitTestImageName) - img, _ := job.Stdout.AddEnv() + d := getDaemon(eng) + + _, err := d.Repositories().Lookup(unitTestImageName) // If the unit test is not found, try to download it. - if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { + if err != nil { + // seems like we can just ignore the error here... + // there was a check of imgId from job stdout against unittestid but + // if there was an error how could the imgid from the job + // be compared?! it's obvious it's different, am I totally wrong? + // Retrieve the Image - job = eng.Job("pull", unitTestImageName) - job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) - if err := job.Run(); err != nil { + imagePullConfig := &graph.ImagePullConfig{ + Parallel: true, + OutStream: ioutils.NopWriteCloser(os.Stdout), + AuthConfig: &cliconfig.AuthConfig{}, + } + if err := d.Repositories().Pull(unitTestImageName, "", imagePullConfig); err != nil { logrus.Fatalf("Unable to pull the test image: %s", err) } } @@ -146,9 +154,10 @@ func spawnGlobalDaemon() { } t := std_log.New(os.Stderr, "", 0) eng := NewTestEngine(t) - globalEngine = eng globalDaemon = mkDaemonFromEngine(eng, t) + serverConfig := &apiserver.ServerConfig{Logging: true} + api := apiserver.New(serverConfig, eng) // Spawn a Daemon go func() { logrus.Debugf("Spawning global daemon for integration tests") @@ -156,75 +165,17 @@ func spawnGlobalDaemon() { Scheme: testDaemonProto, Host: testDaemonAddr, } - job := eng.Job("serveapi", listenURL.String()) - job.SetenvBool("Logging", true) - if err := job.Run(); err != nil { - logrus.Fatalf("Unable to spawn the test daemon: %s", err) - } - }() - - // Give some time to ListenAndServer to actually start - // FIXME: use inmem transports instead of tcp - time.Sleep(time.Second) - - if err := eng.Job("acceptconnections").Run(); err != nil { - logrus.Fatalf("Unable to accept connections for test api: %s", err) - } -} - -func spawnLegitHttpsDaemon() { - if globalHttpsEngine != nil { - return - } - globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", - "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") -} - -func spawnRogueHttpsDaemon() { - if globalRogueHttpsEngine != nil { - return - } - globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", - "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") -} -func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { - t := std_log.New(os.Stderr, "", 0) - root, err := newTestDirectory(unitTestStoreBase) - if err != nil { - t.Fatal(err) - } - // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, - // and we want to set it to true. - - eng := newTestEngine(t, true, root) - - // Spawn a Daemon - go func() { - logrus.Debugf("Spawning https daemon for integration tests") - listenURL := &url.URL{ - Scheme: testDaemonHttpsProto, - Host: addr, - } - job := eng.Job("serveapi", listenURL.String()) - job.SetenvBool("Logging", true) - job.SetenvBool("Tls", true) - job.SetenvBool("TlsVerify", true) - job.Setenv("TlsCa", cacert) - job.Setenv("TlsCert", cert) - job.Setenv("TlsKey", key) - if err := job.Run(); err != nil { + if err := api.ServeApi([]string{listenURL.String()}); err != nil { logrus.Fatalf("Unable to spawn the test daemon: %s", err) } }() // Give some time to ListenAndServer to actually start + // FIXME: use inmem transports instead of tcp time.Sleep(time.Second) - if err := eng.Job("acceptconnections").Run(); err != nil { - logrus.Fatalf("Unable to accept connections for test api: %s", err) - } - return eng + api.AcceptConnections(getDaemon(eng)) } // FIXME: test that ImagePull(json=true) send correct json output @@ -254,7 +205,7 @@ func TestDaemonCreate(t *testing.T) { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"ls", "-al"}, + Cmd: runconfig.NewCommand("ls", "-al"), }, &runconfig.HostConfig{}, "", @@ -295,15 +246,16 @@ func TestDaemonCreate(t *testing.T) { } // Test that conflict error displays correct details + cmd := runconfig.NewCommand("ls", "-al") testContainer, _, _ := daemon.Create( &runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"ls", "-al"}, + Cmd: cmd, }, &runconfig.HostConfig{}, "conflictname", ) - if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), stringid.TruncateID(testContainer.ID)) { + if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: cmd}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), stringid.TruncateID(testContainer.ID)) { t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err) } @@ -315,7 +267,7 @@ func TestDaemonCreate(t *testing.T) { if _, _, err := daemon.Create( &runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{}, + Cmd: runconfig.NewCommand(), }, &runconfig.HostConfig{}, "", @@ -325,7 +277,7 @@ func TestDaemonCreate(t *testing.T) { config := &runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"/bin/ls"}, + Cmd: runconfig.NewCommand("/bin/ls"), PortSpecs: []string{"80"}, } container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") @@ -338,7 +290,7 @@ func TestDaemonCreate(t *testing.T) { // test expose 80:8000 container, warnings, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"ls", "-al"}, + Cmd: runconfig.NewCommand("ls", "-al"), PortSpecs: []string{"80:8000"}, }, &runconfig.HostConfig{}, @@ -358,7 +310,7 @@ func TestDestroy(t *testing.T) { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"ls", "-al"}, + Cmd: runconfig.NewCommand("ls", "-al"), }, &runconfig.HostConfig{}, "") @@ -421,14 +373,13 @@ func TestGet(t *testing.T) { func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { var ( - err error - id string - outputBuffer = bytes.NewBuffer(nil) - strPort string - eng = NewTestEngine(t) - daemon = mkDaemonFromEngine(eng, t) - port = 5554 - p nat.Port + err error + id string + strPort string + eng = NewTestEngine(t) + daemon = mkDaemonFromEngine(eng, t) + port = 5554 + p nat.Port ) defer func() { if err != nil { @@ -451,16 +402,14 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) ep[p] = struct{}{} - jobCreate := eng.Job("create") - jobCreate.Setenv("Image", unitTestImageID) - jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) - jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) - jobCreate.SetenvJson("ExposedPorts", ep) - jobCreate.Stdout.Add(outputBuffer) - if err := jobCreate.Run(); err != nil { - t.Fatal(err) + c := &runconfig.Config{ + Image: unitTestImageID, + Cmd: runconfig.NewCommand("sh", "-c", cmd), + PortSpecs: []string{fmt.Sprintf("%s/%s", strPort, proto)}, + ExposedPorts: ep, } - id = engine.Tail(outputBuffer, 1) + + id, _, err = daemon.ContainerCreate(unitTestImageID, c, &runconfig.HostConfig{}) // FIXME: this relies on the undocumented behavior of daemon.Create // which will return a nil error AND container if the exposed ports // are invalid. That behavior should be fixed! @@ -471,15 +420,7 @@ func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daem } - jobStart := eng.Job("start", id) - portBindings := make(map[nat.Port][]nat.PortBinding) - portBindings[p] = []nat.PortBinding{ - {}, - } - if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { - t.Fatal(err) - } - if err := jobStart.Run(); err != nil { + if err := daemon.ContainerStart(id, &runconfig.HostConfig{}); err != nil { t.Fatal(err) } @@ -730,20 +671,15 @@ func TestContainerNameValidation(t *testing.T) { t.Fatal(err) } - var outputBuffer = bytes.NewBuffer(nil) - job := eng.Job("create", test.Name) - if err := job.ImportEnv(config); err != nil { - t.Fatal(err) - } - job.Stdout.Add(outputBuffer) - if err := job.Run(); err != nil { + containerId, _, err := daemon.ContainerCreate(test.Name, config, &runconfig.HostConfig{}) + if err != nil { if !test.Valid { continue } t.Fatal(err) } - container, err := daemon.Get(engine.Tail(outputBuffer, 1)) + container, err := daemon.Get(containerId) if err != nil { t.Fatal(err) } @@ -758,7 +694,6 @@ func TestContainerNameValidation(t *testing.T) { t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) } } - } func TestLinkChildContainer(t *testing.T) { @@ -875,7 +810,7 @@ func TestDestroyWithInitLayer(t *testing.T) { container, _, err := daemon.Create(&runconfig.Config{ Image: GetTestImage(daemon).ID, - Cmd: []string{"ls", "-al"}, + Cmd: runconfig.NewCommand("ls", "-al"), }, &runconfig.HostConfig{}, "") @@ -902,7 +837,7 @@ func TestDestroyWithInitLayer(t *testing.T) { // Make sure that the container does not exist in the driver if _, err := driver.Get(container.ID, ""); err == nil { - t.Fatal("Conttainer should not exist in the driver") + t.Fatal("Container should not exist in the driver") } // Make sure that the init layer is removed from the driver diff --git a/integration/server_test.go b/integration/server_test.go deleted file mode 100644 index b2c4dd80a4004..0000000000000 --- a/integration/server_test.go +++ /dev/null @@ -1,214 +0,0 @@ -package docker - -import ( - "bytes" - "testing" - "time" - - "github.com/docker/docker/builder" - "github.com/docker/docker/daemon" - "github.com/docker/docker/engine" -) - -func TestCreateNumberHostname(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - config, _, _, err := parseRun([]string{"-h", "web.0", unitTestImageID, "echo test"}) - if err != nil { - t.Fatal(err) - } - - createTestContainer(eng, config, t) -} - -func TestCommit(t *testing.T) { - eng := NewTestEngine(t) - b := &builder.BuilderJob{Engine: eng} - b.Install() - defer mkDaemonFromEngine(eng, t).Nuke() - - config, _, _, err := parseRun([]string{unitTestImageID, "/bin/cat"}) - if err != nil { - t.Fatal(err) - } - - id := createTestContainer(eng, config, t) - - job := eng.Job("commit", id) - job.Setenv("repo", "testrepo") - job.Setenv("tag", "testtag") - job.SetenvJson("config", config) - if err := job.Run(); err != nil { - t.Fatal(err) - } -} - -func TestMergeConfigOnCommit(t *testing.T) { - eng := NewTestEngine(t) - b := &builder.BuilderJob{Engine: eng} - b.Install() - runtime := mkDaemonFromEngine(eng, t) - defer runtime.Nuke() - - container1, _, _ := mkContainer(runtime, []string{"-e", "FOO=bar", unitTestImageID, "echo test > /tmp/foo"}, t) - defer runtime.Rm(container1) - - config, _, _, err := parseRun([]string{container1.ID, "cat /tmp/foo"}) - if err != nil { - t.Error(err) - } - - job := eng.Job("commit", container1.ID) - job.Setenv("repo", "testrepo") - job.Setenv("tag", "testtag") - job.SetenvJson("config", config) - var outputBuffer = bytes.NewBuffer(nil) - job.Stdout.Add(outputBuffer) - if err := job.Run(); err != nil { - t.Error(err) - } - - container2, _, _ := mkContainer(runtime, []string{engine.Tail(outputBuffer, 1)}, t) - defer runtime.Rm(container2) - - job = eng.Job("container_inspect", container1.Name) - baseContainer, _ := job.Stdout.AddEnv() - if err := job.Run(); err != nil { - t.Error(err) - } - - job = eng.Job("container_inspect", container2.Name) - commitContainer, _ := job.Stdout.AddEnv() - if err := job.Run(); err != nil { - t.Error(err) - } - - baseConfig := baseContainer.GetSubEnv("Config") - commitConfig := commitContainer.GetSubEnv("Config") - - if commitConfig.Get("Env") != baseConfig.Get("Env") { - t.Fatalf("Env config in committed container should be %v, was %v", - baseConfig.Get("Env"), commitConfig.Get("Env")) - } - - if baseConfig.Get("Cmd") != "[\"echo test \\u003e /tmp/foo\"]" { - t.Fatalf("Cmd in base container should be [\"echo test \\u003e /tmp/foo\"], was %s", - baseConfig.Get("Cmd")) - } - - if commitConfig.Get("Cmd") != "[\"cat /tmp/foo\"]" { - t.Fatalf("Cmd in committed container should be [\"cat /tmp/foo\"], was %s", - commitConfig.Get("Cmd")) - } -} - -func TestRestartKillWait(t *testing.T) { - eng := NewTestEngine(t) - runtime := mkDaemonFromEngine(eng, t) - defer runtime.Nuke() - - config, hostConfig, _, err := parseRun([]string{"-i", unitTestImageID, "/bin/cat"}) - if err != nil { - t.Fatal(err) - } - - id := createTestContainer(eng, config, t) - - containers, err := runtime.Containers(&daemon.ContainersConfig{All: true}) - - if err != nil { - t.Errorf("Error getting containers1: %q", err) - } - - if len(containers) != 1 { - t.Errorf("Expected 1 container, %v found", len(containers)) - } - - job := eng.Job("start", id) - if err := job.ImportEnv(hostConfig); err != nil { - t.Fatal(err) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - job = eng.Job("kill", id) - if err := job.Run(); err != nil { - t.Fatal(err) - } - - eng = newTestEngine(t, false, runtime.Config().Root) - runtime = mkDaemonFromEngine(eng, t) - - containers, err = runtime.Containers(&daemon.ContainersConfig{All: true}) - - if err != nil { - t.Errorf("Error getting containers1: %q", err) - } - if len(containers) != 1 { - t.Errorf("Expected 1 container, %v found", len(containers)) - } - - setTimeout(t, "Waiting on stopped container timedout", 5*time.Second, func() { - job = eng.Job("wait", containers[0].ID) - if err := job.Run(); err != nil { - t.Fatal(err) - } - }) -} - -func TestRunWithTooLowMemoryLimit(t *testing.T) { - eng := NewTestEngine(t) - defer mkDaemonFromEngine(eng, t).Nuke() - - // Try to create a container with a memory limit of 1 byte less than the minimum allowed limit. - job := eng.Job("create") - job.Setenv("Image", unitTestImageID) - job.Setenv("Memory", "524287") - job.Setenv("CpuShares", "1000") - job.SetenvList("Cmd", []string{"/bin/cat"}) - if err := job.Run(); err == nil { - t.Errorf("Memory limit is smaller than the allowed limit. Container creation should've failed!") - } -} - -func TestImagesFilter(t *testing.T) { - eng := NewTestEngine(t) - defer nuke(mkDaemonFromEngine(eng, t)) - - if err := eng.Job("tag", unitTestImageName, "utest", "tag1").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest/docker", "tag2").Run(); err != nil { - t.Fatal(err) - } - - if err := eng.Job("tag", unitTestImageName, "utest:5000/docker", "tag3").Run(); err != nil { - t.Fatal(err) - } - - images := getImages(eng, t, false, "utest*/*") - - if len(images[0].RepoTags) != 2 { - t.Fatal("incorrect number of matches returned") - } - - images = getImages(eng, t, false, "utest") - - if len(images[0].RepoTags) != 1 { - t.Fatal("incorrect number of matches returned") - } - - images = getImages(eng, t, false, "utest*") - - if len(images[0].RepoTags) != 1 { - t.Fatal("incorrect number of matches returned") - } - - images = getImages(eng, t, false, "*5000*/*") - - if len(images[0].RepoTags) != 1 { - t.Fatal("incorrect number of matches returned") - } -} diff --git a/integration/utils.go b/integration/utils.go new file mode 100644 index 0000000000000..62e02e9bb141f --- /dev/null +++ b/integration/utils.go @@ -0,0 +1,88 @@ +package docker + +import ( + "bufio" + "fmt" + "io" + "strings" + "testing" + "time" + + "github.com/docker/docker/daemon" +) + +func closeWrap(args ...io.Closer) error { + e := false + ret := fmt.Errorf("Error closing elements") + for _, c := range args { + if err := c.Close(); err != nil { + e = true + ret = fmt.Errorf("%s\n%s", ret, err) + } + } + if e { + return ret + } + return nil +} + +func waitContainerStart(t *testing.T, timeout time.Duration) *daemon.Container { + var container *daemon.Container + + setTimeout(t, "Waiting for the container to be started timed out", timeout, func() { + for { + l := globalDaemon.List() + if len(l) == 1 && l[0].IsRunning() { + container = l[0] + break + } + time.Sleep(10 * time.Millisecond) + } + }) + + if container == nil { + t.Fatal("An error occurred while waiting for the container to start") + } + + return container +} + +func setTimeout(t *testing.T, msg string, d time.Duration, f func()) { + c := make(chan bool) + + // Make sure we are not too long + go func() { + time.Sleep(d) + c <- true + }() + go func() { + f() + c <- false + }() + if <-c && msg != "" { + t.Fatal(msg) + } +} + +func expectPipe(expected string, r io.Reader) error { + o, err := bufio.NewReader(r).ReadString('\n') + if err != nil { + return err + } + if strings.Trim(o, " \r\n") != expected { + return fmt.Errorf("Unexpected output. Expected [%s], received [%s]", expected, o) + } + return nil +} + +func assertPipe(input, output string, r io.Reader, w io.Writer, count int) error { + for i := 0; i < count; i++ { + if _, err := w.Write([]byte(input)); err != nil { + return err + } + if err := expectPipe(output, r); err != nil { + return err + } + } + return nil +} diff --git a/integration/utils_test.go b/integration/utils_test.go index 5c9a61b607110..9479d4296cd81 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -17,7 +17,6 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" "github.com/docker/docker/api/types" - "github.com/docker/docker/builtins" "github.com/docker/docker/daemon" "github.com/docker/docker/daemon/networkdriver/bridge" "github.com/docker/docker/engine" @@ -44,16 +43,11 @@ func mkDaemon(f Fataler) *daemon.Daemon { } func createNamedTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler, name string) (shortId string) { - job := eng.Job("create", name) - if err := job.ImportEnv(config); err != nil { - f.Fatal(err) - } - var outputBuffer = bytes.NewBuffer(nil) - job.Stdout.Add(outputBuffer) - if err := job.Run(); err != nil { + containerId, _, err := getDaemon(eng).ContainerCreate(name, config, &runconfig.HostConfig{}) + if err != nil { f.Fatal(err) } - return engine.Tail(outputBuffer, 1) + return containerId } func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler) (shortId string) { @@ -61,8 +55,7 @@ func createTestContainer(eng *engine.Engine, config *runconfig.Config, f Fataler } func startContainer(eng *engine.Engine, id string, t Fataler) { - job := eng.Job("start", id) - if err := job.Run(); err != nil { + if err := getDaemon(eng).ContainerStart(id, &runconfig.HostConfig{}); err != nil { t.Fatal(err) } } @@ -105,7 +98,7 @@ func containerWaitTimeout(eng *engine.Engine, id string, t Fataler) error { } func containerKill(eng *engine.Engine, id string, t Fataler) { - if err := eng.Job("kill", id).Run(); err != nil { + if err := getDaemon(eng).ContainerKill(id, 0); err != nil { t.Fatal(err) } } @@ -176,10 +169,6 @@ func newTestEngine(t Fataler, autorestart bool, root string) *engine.Engine { eng := engine.New() eng.Logging = false - // Load default plugins - if err := builtins.Register(eng); err != nil { - t.Fatal(err) - } // (This is manually copied and modified from main() until we have a more generic plugin system) cfg := &daemon.Config{ diff --git a/integration/z_final_test.go b/integration/z_final_test.go index 13cd0c3fd4f0a..d6ef2884f2518 100644 --- a/integration/z_final_test.go +++ b/integration/z_final_test.go @@ -1,13 +1,14 @@ package docker import ( - "github.com/docker/docker/utils" "runtime" "testing" + + "github.com/docker/docker/pkg/fileutils" ) func displayFdGoroutines(t *testing.T) { - t.Logf("File Descriptors: %d, Goroutines: %d", utils.GetTotalUsedFds(), runtime.NumGoroutine()) + t.Logf("File Descriptors: %d, Goroutines: %d", fileutils.GetTotalUsedFds(), runtime.NumGoroutine()) } func TestFinal(t *testing.T) { diff --git a/links/links.go b/links/links.go index 0e5e806e57be3..935bff4ae3911 100644 --- a/links/links.go +++ b/links/links.go @@ -6,8 +6,8 @@ import ( "strings" "github.com/docker/docker/daemon/networkdriver/bridge" - "github.com/docker/docker/engine" "github.com/docker/docker/nat" + "github.com/docker/docker/pkg/iptables" ) type Link struct { @@ -17,10 +17,9 @@ type Link struct { ChildEnvironment []string Ports []nat.Port IsEnabled bool - eng *engine.Engine } -func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}, eng *engine.Engine) (*Link, error) { +func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) (*Link, error) { var ( i int @@ -38,7 +37,6 @@ func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat. ParentIP: parentIP, ChildEnvironment: env, Ports: ports, - eng: eng, } return l, nil @@ -109,8 +107,8 @@ func (l *Link) ToEnv() []string { if l.ChildEnvironment != nil { for _, v := range l.ChildEnvironment { - parts := strings.Split(v, "=") - if len(parts) != 2 { + parts := strings.SplitN(v, "=", 2) + if len(parts) < 2 { continue } // Ignore a few variables that are added during docker build (and not really relevant to linked containers) @@ -146,6 +144,8 @@ func (l *Link) Enable() error { if err := l.toggle("-A", false); err != nil { return err } + // call this on Firewalld reload + iptables.OnReloaded(func() { l.toggle("-I", false) }) l.IsEnabled = true return nil } @@ -155,7 +155,8 @@ func (l *Link) Disable() { // exist in iptables // -D == iptables delete flag l.toggle("-D", true) - + // call this on Firewalld reload + iptables.OnReloaded(func() { l.toggle("-D", true) }) l.IsEnabled = false } diff --git a/links/links_test.go b/links/links_test.go index ba548fc5b3922..e639e2c42e557 100644 --- a/links/links_test.go +++ b/links/links_test.go @@ -2,16 +2,17 @@ package links import ( "fmt" - "github.com/docker/docker/nat" "strings" "testing" + + "github.com/docker/docker/nat" ) func TestLinkNaming(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} - link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports, nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) if err != nil { t.Fatal(err) } @@ -41,7 +42,7 @@ func TestLinkNew(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} - link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports, nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) if err != nil { t.Fatal(err) } @@ -72,7 +73,7 @@ func TestLinkEnv(t *testing.T) { ports := make(nat.PortSet) ports[nat.Port("6379/tcp")] = struct{}{} - link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) if err != nil { t.Fatal(err) } @@ -115,7 +116,7 @@ func TestLinkMultipleEnv(t *testing.T) { ports[nat.Port("6380/tcp")] = struct{}{} ports[nat.Port("6381/tcp")] = struct{}{} - link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) if err != nil { t.Fatal(err) } @@ -164,7 +165,7 @@ func TestLinkPortRangeEnv(t *testing.T) { ports[nat.Port("6380/tcp")] = struct{}{} ports[nat.Port("6381/tcp")] = struct{}{} - link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports, nil) + link, err := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) if err != nil { t.Fatal(err) } diff --git a/opts/opts.go b/opts/opts.go index df9decf61fa0c..d2c32f13c7229 100644 --- a/opts/opts.go +++ b/opts/opts.go @@ -8,16 +8,16 @@ import ( "regexp" "strings" - "github.com/docker/docker/api" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/utils" ) var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + DefaultUnixSocket = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket ) func ListVar(values *[]string, names []string, usage string) { @@ -25,7 +25,7 @@ func ListVar(values *[]string, names []string, usage string) { } func HostListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, api.ValidateHost), names, usage) + flag.Var(newListOptsRef(values, ValidateHost), names, usage) } func IPListVar(values *[]string, names []string, usage string) { @@ -174,7 +174,7 @@ func ValidateEnv(val string) (string, error) { if len(arr) > 1 { return val, nil } - if !utils.DoesEnvExist(val) { + if !doesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil @@ -234,3 +234,21 @@ func ValidateLabel(val string) (string, error) { } return val, nil } + +func ValidateHost(val string) (string, error) { + host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) + if err != nil { + return val, err + } + return host, nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go index 7082cd9088277..4d8d260087595 100644 --- a/pkg/archive/archive.go +++ b/pkg/archive/archive.go @@ -388,25 +388,16 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } -func escapeName(name string) string { - escaped := make([]byte, 0) - for i, c := range []byte(name) { - if i == 0 && c == '/' { - continue - } - // all printable chars except "-" which is 0x2d - if (0x20 <= c && c <= 0x7E) && c != 0x2d { - escaped = append(escaped, c) - } else { - escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) - } - } - return string(escaped) -} - // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) @@ -457,7 +448,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns) + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) if err != nil { logrus.Debugf("Error matching %s", relFilePath, err) return err @@ -465,7 +456,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } if skip { - if f.IsDir() { + if !exceptions && f.IsDir() { return filepath.SkipDir } return nil diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go index c127b307e2bc0..ae9b5a8cd2520 100644 --- a/pkg/archive/archive_test.go +++ b/pkg/archive/archive_test.go @@ -14,9 +14,150 @@ import ( "testing" "time" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := CmdStream(cmd, nil) @@ -66,6 +207,315 @@ func TestCmdStreamGood(t *testing.T) { } } +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + _, err = os.Create(srcFile) + if err != nil { + t.Fatalf("Fail to create the source file") + } + err = UntarPath(srcFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { @@ -179,11 +629,56 @@ func TestTarUntar(t *testing.T) { } } +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + func TestTarWithOptions(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) @@ -196,8 +691,11 @@ func TestTarWithOptions(t *testing.T) { opts *TarOptions numChanges int }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 1}, + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) @@ -256,6 +754,58 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") if err != nil { diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go index b33e0fb0055a5..72bc71e06b413 100644 --- a/pkg/archive/archive_windows_test.go +++ b/pkg/archive/archive_windows_test.go @@ -20,7 +20,7 @@ func TestCanonicalTarNameForPath(t *testing.T) { if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have pailed with error. in=%s out=%s", v.in, out) + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) } else if !v.shouldFail && out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go index 53ec575b67ab3..290b2dd40222d 100644 --- a/pkg/archive/changes_test.go +++ b/pkg/archive/changes_test.go @@ -6,6 +6,7 @@ import ( "os/exec" "path" "sort" + "syscall" "testing" "time" ) @@ -91,17 +92,130 @@ func createSampleDir(t *testing.T, root string) { } } +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + // Create a folder + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + // Let's create folders that with have the role of layers with the same data + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Let's modify modtime for dir1 to be sure it's the same for the two layer (to not having false positive) + fi, err := os.Stat(dir1) + if err != nil { + return + } + mtime := fi.ModTime() + stat := fi.Sys().(*syscall.Stat_t) + atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + + layerDir1 := path.Join(layer, "dir1") + os.Chtimes(layerDir1, atime, mtime) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + // Create an directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } + defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } + defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) @@ -291,3 +405,41 @@ func TestApplyLayer(t *testing.T) { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} diff --git a/pkg/archive/wrap_test.go b/pkg/archive/wrap_test.go new file mode 100644 index 0000000000000..46ab36697a75b --- /dev/null +++ b/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go index 17d3739d1ac86..49d19175d7d12 100644 --- a/pkg/chrootarchive/archive.go +++ b/pkg/chrootarchive/archive.go @@ -1,6 +1,7 @@ package chrootarchive import ( + "bytes" "encoding/json" "flag" "fmt" @@ -29,7 +30,8 @@ func untar() { var options *archive.TarOptions - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { fatal(err) } @@ -62,28 +64,39 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error } } - // We can't pass the exclude list directly via cmd line - // because we easily overrun the shell max argument list length - // when the full image list is passed (e.g. when this is used - // by `docker load`). Instead we will add the JSON marshalled - // and placed in the env, which has significantly larger - // max size - data, err := json.Marshal(options) - if err != nil { - return fmt.Errorf("Untar json encode: %v", err) - } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - out, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Untar %s %s", err, out) + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) } return nil } diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go index 45397d38fe7da..f9b5b09707ae6 100644 --- a/pkg/chrootarchive/archive_test.go +++ b/pkg/chrootarchive/archive_test.go @@ -3,11 +3,13 @@ package chrootarchive import ( "bytes" "fmt" + "hash/crc32" "io" "io/ioutil" "os" "path" "path/filepath" + "strings" "testing" "time" @@ -48,6 +50,42 @@ func TestChrootTarUntar(t *testing.T) { } } +// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of +// local images) +func TestChrootUntarWithHugeExcludesList(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + src := filepath.Join(tmpdir, "src") + if err := os.MkdirAll(src, 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { + t.Fatal(err) + } + stream, err := archive.Tar(src, archive.Uncompressed) + if err != nil { + t.Fatal(err) + } + dest := filepath.Join(tmpdir, "dest") + if err := os.MkdirAll(dest, 0700); err != nil { + t.Fatal(err) + } + options := &archive.TarOptions{} + //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow + //on most systems when passed via environment or command line arguments + excludes := make([]string, 65534, 65534) + for i := 0; i < 65534; i++ { + excludes[i] = strings.Repeat(string(i), 64) + } + options.ExcludePatterns = excludes + if err := Untar(stream, dest, options); err != nil { + t.Fatal(err) + } +} + func TestChrootUntarEmptyArchive(t *testing.T) { tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") if err != nil { @@ -59,15 +97,15 @@ func TestChrootUntarEmptyArchive(t *testing.T) { } } -func prepareSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { +func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + if makeSymLinks { + if err := os.Symlink(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } @@ -76,8 +114,44 @@ func prepareSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool return totalSize, nil } -func TestChrootTarUntarWithSoftLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSoftLink") +func getHash(filename string) (uint32, error) { + stream, err := ioutil.ReadFile(filename) + if err != nil { + return 0, err + } + hash := crc32.NewIEEE() + hash.Write(stream) + return hash.Sum32(), nil +} + +func compareDirectories(src string, dest string) error { + changes, err := archive.ChangesDirs(dest, src) + if err != nil { + return err + } + if len(changes) > 0 { + return fmt.Errorf("Unexpected differences after untar: %v", changes) + } + return nil +} + +func compareFiles(src string, dest string) error { + srcHash, err := getHash(src) + if err != nil { + return err + } + destHash, err := getHash(dest) + if err != nil { + return err + } + if srcHash != destHash { + return fmt.Errorf("%s is different from %s", src, dest) + } + return nil +} + +func TestChrootTarUntarWithSymlink(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") if err != nil { t.Fatal(err) } @@ -93,6 +167,9 @@ func TestChrootTarUntarWithSoftLink(t *testing.T) { if err := TarUntar(src, dest); err != nil { t.Fatal(err) } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } } func TestChrootCopyWithTar(t *testing.T) { @@ -108,19 +185,35 @@ func TestChrootCopyWithTar(t *testing.T) { if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } - dest := filepath.Join(tmpdir, "dest") + // Copy directory + dest := filepath.Join(tmpdir, "dest") if err := CopyWithTar(src, dest); err != nil { t.Fatal(err) } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } + // Copy file srcfile := filepath.Join(src, "file-1") - if err := CopyWithTar(srcfile, dest); err != nil { + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyWithTar(srcfile, destfile); err != nil { t.Fatal(err) } + if err := compareFiles(srcfile, destfile); err != nil { + t.Fatal(err) + } + // Copy symbolic link - linkfile := filepath.Join(src, "file-1-link") - if err := CopyWithTar(linkfile, dest); err != nil { + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } @@ -138,19 +231,32 @@ func TestChrootCopyFileWithTar(t *testing.T) { if _, err := prepareSourceDirectory(10, src, true); err != nil { t.Fatal(err) } - dest := filepath.Join(tmpdir, "dest") + // Copy directory + dest := filepath.Join(tmpdir, "dest") if err := CopyFileWithTar(src, dest); err == nil { t.Fatal("Expected error on copying directory") } + // Copy file srcfile := filepath.Join(src, "file-1") - if err := CopyFileWithTar(srcfile, dest); err != nil { + dest = filepath.Join(tmpdir, "destFile") + destfile := filepath.Join(dest, "file-1") + if err := CopyFileWithTar(srcfile, destfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcfile, destfile); err != nil { t.Fatal(err) } + // Copy symbolic link - linkfile := filepath.Join(src, "file-1-link") - if err := CopyFileWithTar(linkfile, dest); err != nil { + srcLinkfile := filepath.Join(src, "file-1-link") + dest = filepath.Join(tmpdir, "destSymlink") + destLinkfile := filepath.Join(dest, "file-1-link") + if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { + t.Fatal(err) + } + if err := compareFiles(srcLinkfile, destLinkfile); err != nil { t.Fatal(err) } } @@ -188,6 +294,9 @@ func TestChrootUntarPath(t *testing.T) { if err := UntarPath(tarfile, dest); err != nil { t.Fatal(err) } + if err := compareDirectories(src, dest); err != nil { + t.Fatal(err) + } } type slowEmptyTarReader struct { diff --git a/pkg/etchosts/etchosts.go b/pkg/etchosts/etchosts.go index d7edef27f64ad..bef4a480cb07e 100644 --- a/pkg/etchosts/etchosts.go +++ b/pkg/etchosts/etchosts.go @@ -8,16 +8,19 @@ import ( "regexp" ) +// Structure for a single host record type Record struct { Hosts string IP string } +// Writes record to file and returns bytes written or error func (r Record) WriteTo(w io.Writer) (int64, error) { n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) return int64(n), err } +// Default hosts config records slice var defaultContent = []Record{ {Hosts: "localhost", IP: "127.0.0.1"}, {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, @@ -27,9 +30,14 @@ var defaultContent = []Record{ {Hosts: "ip6-allrouters", IP: "ff02::2"}, } +// Build function +// path is path to host file string required +// IP, hostname, and domainname set main record leave empty for no master record +// extraContent is an array of extra host records. func Build(path, IP, hostname, domainname string, extraContent []Record) error { content := bytes.NewBuffer(nil) if IP != "" { + //set main record var mainRec Record mainRec.IP = IP if domainname != "" { @@ -41,13 +49,13 @@ func Build(path, IP, hostname, domainname string, extraContent []Record) error { return err } } - + // Write defaultContent slice to buffer for _, r := range defaultContent { if _, err := r.WriteTo(content); err != nil { return err } } - + // Write extra content from function arguments for _, r := range extraContent { if _, err := r.WriteTo(content); err != nil { return err @@ -57,6 +65,10 @@ func Build(path, IP, hostname, domainname string, extraContent []Record) error { return ioutil.WriteFile(path, content.Bytes(), 0644) } +// Update all IP addresses where hostname matches. +// path is path to host file +// IP is new IP address +// hostname is hostname to search for to replace IP func Update(path, IP, hostname string) error { old, err := ioutil.ReadFile(path) if err != nil { diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go index 432529765104a..fdafb53c7fefa 100644 --- a/pkg/fileutils/fileutils.go +++ b/pkg/fileutils/fileutils.go @@ -1,27 +1,170 @@ package fileutils import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" "path/filepath" + "strings" "github.com/Sirupsen/logrus" ) -// Matches returns true if relFilePath matches any of the patterns -func Matches(relFilePath string, patterns []string) (bool, error) { - for _, exclude := range patterns { - matched, err := filepath.Match(exclude, relFilePath) +func Exclusion(pattern string) bool { + return pattern[0] == '!' +} + +func Empty(pattern string) bool { + return pattern == "" +} + +// Cleanpatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if Empty(pattern) { + continue + } + if Exclusion(pattern) { + if len(pattern) == 1 { + logrus.Errorf("Illegal exclusion pattern: %s", pattern) + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if Exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// Matches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if Exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) if err != nil { - logrus.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) + logrus.Errorf("Error matching: %s (pattern: %s)", file, pattern) return false, err } - if matched { - if filepath.Clean(relFilePath) == "." { - logrus.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) - continue + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) } - logrus.Debugf("Skipping excluded path: %s", relFilePath) - return true, nil } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + return matched, nil +} + +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) } - return false, nil + return realPath, nil } diff --git a/pkg/fileutils/fileutils_test.go b/pkg/fileutils/fileutils_test.go new file mode 100644 index 0000000000000..ef931684c1281 --- /dev/null +++ b/pkg/fileutils/fileutils_test.go @@ -0,0 +1,357 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := Exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := Empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go index c6f13eda27248..b9433dbddcb50 100644 --- a/pkg/graphdb/graphdb.go +++ b/pkg/graphdb/graphdb.go @@ -378,12 +378,22 @@ func (db *Database) Purge(id string) (int, error) { tx.Rollback() return -1, err } - changes, err := rows.RowsAffected() if err != nil { return -1, err } + // Clear who's using this id as parent + refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) + if err != nil { + tx.Rollback() + return -1, err + } + refsCount, err := refs.RowsAffected() + if err != nil { + return -1, err + } + // Delete entity if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { tx.Rollback() @@ -394,7 +404,7 @@ func (db *Database) Purge(id string) (int, error) { return -1, err } - return int(changes), nil + return int(changes + refsCount), nil } // Rename an edge for a given path diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go index f22828560c235..1cd223bd9cd89 100644 --- a/pkg/graphdb/graphdb_test.go +++ b/pkg/graphdb/graphdb_test.go @@ -52,7 +52,7 @@ func TestGetRootEntity(t *testing.T) { t.Fatal("Entity should not be nil") } if e.ID() != "0" { - t.Fatalf("Enity id should be 0, got %s", e.ID()) + t.Fatalf("Entity id should be 0, got %s", e.ID()) } } @@ -74,7 +74,7 @@ func TestSetDuplicateEntity(t *testing.T) { t.Fatal(err) } if _, err := db.Set("/foo", "43"); err == nil { - t.Fatalf("Creating an entry with a duplciate path did not cause an error") + t.Fatalf("Creating an entry with a duplicate path did not cause an error") } } @@ -472,8 +472,8 @@ func TestPurgeId(t *testing.T) { db.Set("/webapp", "1") - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expect reference count to be 1, got %d", c) } db.Set("/db", "2") @@ -484,7 +484,45 @@ func TestPurgeId(t *testing.T) { t.Fatal(err) } if count != 2 { - t.Fatal("Expected 2 references to be removed") + t.Fatalf("Expected 2 references to be removed, got %d", count) + } +} + +// Regression test https://github.com/docker/docker/issues/12334 +func TestPurgeIdRefPaths(t *testing.T) { + db, dbpath := newTestDb(t) + defer destroyTestDb(dbpath) + + db.Set("/webapp", "1") + db.Set("/db", "2") + + db.Set("/db/webapp", "1") + + if c := db.Refs("1"); c != 2 { + t.Fatalf("Expected 2 reference for webapp, got %d", c) + } + if c := db.Refs("2"); c != 1 { + t.Fatalf("Expected 1 reference for db, got %d", c) + } + + if rp := db.RefPaths("2"); len(rp) != 1 { + t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) + } + + count, err := db.Purge("2") + if err != nil { + t.Fatal(err) + } + + if count != 2 { + t.Fatalf("Expected 2 rows to be removed, got %d", count) + } + + if c := db.Refs("2"); c != 0 { + t.Fatalf("Expected 0 reference for db, got %d", c) + } + if c := db.Refs("1"); c != 1 { + t.Fatalf("Expected 1 reference for webapp, got %d", c) } } diff --git a/pkg/httputils/httputils.go b/pkg/httputils/httputils.go new file mode 100644 index 0000000000000..1c922240e6f45 --- /dev/null +++ b/pkg/httputils/httputils.go @@ -0,0 +1,26 @@ +package httputils + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/pkg/jsonmessage" +) + +// Request a given URL and return an io.Reader +func Download(url string) (resp *http.Response, err error) { + if resp, err = http.Get(url); err != nil { + return nil, err + } + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) + } + return resp, nil +} + +func NewHTTPRequestError(msg string, res *http.Response) error { + return &jsonmessage.JSONError{ + Message: msg, + Code: res.StatusCode, + } +} diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go index 58ff1af639cca..0e542cbad35ae 100644 --- a/pkg/ioutils/readers.go +++ b/pkg/ioutils/readers.go @@ -3,6 +3,8 @@ package ioutils import ( "bytes" "crypto/rand" + "crypto/sha256" + "encoding/hex" "io" "math/big" "sync" @@ -215,3 +217,11 @@ func (r *bufReader) Close() error { } return closer.Close() } + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go index c0b3608fe6f36..43fdc44ea9686 100644 --- a/pkg/ioutils/writers.go +++ b/pkg/ioutils/writers.go @@ -37,3 +37,24 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { closer: closer, } } + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go new file mode 100644 index 0000000000000..80d7f7f795452 --- /dev/null +++ b/pkg/ioutils/writers_test.go @@ -0,0 +1,41 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/pkg/iptables/firewalld.go b/pkg/iptables/firewalld.go new file mode 100644 index 0000000000000..3087794131467 --- /dev/null +++ b/pkg/iptables/firewalld.go @@ -0,0 +1,163 @@ +package iptables + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "github.com/godbus/dbus" + "strings" +) + +type IPV string + +const ( + Iptables IPV = "ipv4" + Ip6tables IPV = "ipv6" + Ebtables IPV = "eb" +) +const ( + dbusInterface = "org.fedoraproject.FirewallD1" + dbusPath = "/org/fedoraproject/FirewallD1" +) + +// Conn is a connection to firewalld dbus endpoint. +type Conn struct { + sysconn *dbus.Conn + sysobj *dbus.Object + signal chan *dbus.Signal +} + +var ( + connection *Conn + firewalldRunning bool // is Firewalld service running + onReloaded []*func() // callbacks when Firewalld has been reloaded +) + +func FirewalldInit() { + var err error + + connection, err = newConnection() + + if err != nil { + logrus.Errorf("Failed to connect to D-Bus system bus: %s", err) + } + + firewalldRunning = checkRunning() +} + +// New() establishes a connection to the system bus. +func newConnection() (*Conn, error) { + c := new(Conn) + if err := c.initConnection(); err != nil { + return nil, err + } + + return c, nil +} + +// Innitialize D-Bus connection. +func (c *Conn) initConnection() error { + var err error + + c.sysconn, err = dbus.SystemBus() + if err != nil { + return err + } + + // This never fails, even if the service is not running atm. + c.sysobj = c.sysconn.Object(dbusInterface, dbus.ObjectPath(dbusPath)) + + rule := fmt.Sprintf("type='signal',path='%s',interface='%s',sender='%s',member='Reloaded'", + dbusPath, dbusInterface, dbusInterface) + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) + + rule = fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'", + dbusInterface) + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) + + c.signal = make(chan *dbus.Signal, 10) + c.sysconn.Signal(c.signal) + go signalHandler() + + return nil +} + +func signalHandler() { + if connection != nil { + for signal := range connection.signal { + if strings.Contains(signal.Name, "NameOwnerChanged") { + firewalldRunning = checkRunning() + dbusConnectionChanged(signal.Body) + } else if strings.Contains(signal.Name, "Reloaded") { + reloaded() + } + } + } +} + +func dbusConnectionChanged(args []interface{}) { + name := args[0].(string) + old_owner := args[1].(string) + new_owner := args[2].(string) + + if name != dbusInterface { + return + } + + if len(new_owner) > 0 { + connectionEstablished() + } else if len(old_owner) > 0 { + connectionLost() + } +} + +func connectionEstablished() { + reloaded() +} + +func connectionLost() { + // Doesn't do anything for now. Libvirt also doesn't react to this. +} + +// call all callbacks +func reloaded() { + for _, pf := range onReloaded { + (*pf)() + } +} + +// add callback +func OnReloaded(callback func()) { + for _, pf := range onReloaded { + if pf == &callback { + return + } + } + onReloaded = append(onReloaded, &callback) +} + +// Call some remote method to see whether the service is actually running. +func checkRunning() bool { + var zone string + var err error + + if connection != nil { + err = connection.sysobj.Call(dbusInterface+".getDefaultZone", 0).Store(&zone) + logrus.Infof("Firewalld running: %t", err == nil) + return err == nil + } + logrus.Info("Firewalld not running") + return false +} + +// Firewalld's passthrough method simply passes args through to iptables/ip6tables +func Passthrough(ipv IPV, args ...string) ([]byte, error) { + var output string + + logrus.Debugf("Firewalld passthrough: %s, %s", ipv, args) + err := connection.sysobj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output) + if output != "" { + logrus.Debugf("passthrough output: %s", output) + } + + return []byte(output), err +} diff --git a/pkg/iptables/firewalld_test.go b/pkg/iptables/firewalld_test.go new file mode 100644 index 0000000000000..3896007d646b3 --- /dev/null +++ b/pkg/iptables/firewalld_test.go @@ -0,0 +1,78 @@ +package iptables + +import ( + "net" + "strconv" + "testing" +) + +func TestFirewalldInit(t *testing.T) { + FirewalldInit() +} + +func TestReloaded(t *testing.T) { + var err error + var fwdChain *Chain + + fwdChain, err = NewChain("FWD", "lo", Filter) + if err != nil { + t.Fatal(err) + } + defer fwdChain.Remove() + + // copy-pasted from iptables_test:TestLink + ip1 := net.ParseIP("192.168.1.1") + ip2 := net.ParseIP("192.168.1.2") + port := 1234 + proto := "tcp" + + err = fwdChain.Link(Append, ip1, ip2, port, proto) + if err != nil { + t.Fatal(err) + } else { + // to be re-called again later + OnReloaded(func() { fwdChain.Link(Append, ip1, ip2, port, proto) }) + } + + rule1 := []string{ + "-i", fwdChain.Bridge, + "-o", fwdChain.Bridge, + "-p", proto, + "-s", ip1.String(), + "-d", ip2.String(), + "--dport", strconv.Itoa(port), + "-j", "ACCEPT"} + + if !Exists(fwdChain.Table, fwdChain.Name, rule1...) { + t.Fatalf("rule1 does not exist") + } + + // flush all rules + fwdChain.Remove() + + reloaded() + + // make sure the rules have been recreated + if !Exists(fwdChain.Table, fwdChain.Name, rule1...) { + t.Fatalf("rule1 hasn't been recreated") + } +} + +func TestPassthrough(t *testing.T) { + rule1 := []string{ + "-i", "lo", + "-p", "udp", + "--dport", "123", + "-j", "ACCEPT"} + + if firewalldRunning { + _, err := Passthrough(Iptables, append([]string{"-A"}, rule1...)...) + if err != nil { + t.Fatal(err) + } + if !Exists(Filter, "INPUT", rule1...) { + t.Fatalf("rule1 does not exist") + } + } + +} diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go index f8b3aa769d28e..0cfcca7502231 100644 --- a/pkg/iptables/iptables.go +++ b/pkg/iptables/iptables.go @@ -41,7 +41,7 @@ type ChainError struct { Output []byte } -func (e *ChainError) Error() string { +func (e ChainError) Error() string { return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) } @@ -142,7 +142,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr stri "--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))); err != nil { return err } else if len(output) != 0 { - return &ChainError{Chain: "FORWARD", Output: output} + return ChainError{Chain: "FORWARD", Output: output} } if output, err := Raw("-t", string(Filter), string(action), c.Name, @@ -154,7 +154,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr stri "-j", "ACCEPT"); err != nil { return err } else if len(output) != 0 { - return &ChainError{Chain: "FORWARD", Output: output} + return ChainError{Chain: "FORWARD", Output: output} } if output, err := Raw("-t", string(Nat), string(action), "POSTROUTING", @@ -165,7 +165,7 @@ func (c *Chain) Forward(action Action, ip net.IP, port int, proto, destAddr stri "-j", "MASQUERADE"); err != nil { return err } else if len(output) != 0 { - return &ChainError{Chain: "FORWARD", Output: output} + return ChainError{Chain: "FORWARD", Output: output} } return nil @@ -208,7 +208,7 @@ func (c *Chain) Prerouting(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return &ChainError{Chain: "PREROUTING", Output: output} + return ChainError{Chain: "PREROUTING", Output: output} } return nil } @@ -222,7 +222,7 @@ func (c *Chain) Output(action Action, args ...string) error { if output, err := Raw(append(a, "-j", c.Name)...); err != nil { return err } else if len(output) != 0 { - return &ChainError{Chain: "OUTPUT", Output: output} + return ChainError{Chain: "OUTPUT", Output: output} } return nil } @@ -275,6 +275,13 @@ func Exists(table Table, chain string, rule ...string) bool { // Call 'iptables' system command, passing supplied arguments func Raw(args ...string) ([]byte, error) { + if firewalldRunning { + output, err := Passthrough(Iptables, args...) + if err == nil || !strings.Contains(err.Error(), "was not provided by any .service files") { + return output, err + } + + } if err := initCheck(); err != nil { return nil, err diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go index 6244eb01a4fc2..abaa8a73baab6 100644 --- a/pkg/jsonlog/jsonlog_marshalling.go +++ b/pkg/jsonlog/jsonlog_marshalling.go @@ -65,8 +65,7 @@ import ( func (mj *JSONLog) MarshalJSON() ([]byte, error) { var buf bytes.Buffer buf.Grow(1024) - err := mj.MarshalJSONBuf(&buf) - if err != nil { + if err := mj.MarshalJSONBuf(&buf); err != nil { return nil, err } return buf.Bytes(), nil diff --git a/pkg/listenbuffer/buffer.go b/pkg/listenbuffer/buffer.go index 6e3656d2c4fa8..97d622c15fda4 100644 --- a/pkg/listenbuffer/buffer.go +++ b/pkg/listenbuffer/buffer.go @@ -32,7 +32,7 @@ import "net" // NewListenBuffer returns a net.Listener listening on addr with the protocol // passed. The channel passed is used to activate the listenbuffer when the // caller is ready to accept connections. -func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, error) { +func NewListenBuffer(proto, addr string, activate <-chan struct{}) (net.Listener, error) { wrapped, err := net.Listen(proto, addr) if err != nil { return nil, err @@ -46,9 +46,9 @@ func NewListenBuffer(proto, addr string, activate chan struct{}) (net.Listener, // defaultListener is the buffered wrapper around the net.Listener type defaultListener struct { - wrapped net.Listener // The net.Listener wrapped by listenbuffer - ready bool // Whether the listenbuffer has been activated - activate chan struct{} // Channel to control activation of the listenbuffer + wrapped net.Listener // The net.Listener wrapped by listenbuffer + ready bool // Whether the listenbuffer has been activated + activate <-chan struct{} // Channel to control activation of the listenbuffer } // Close closes the wrapped socket. diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go index f2da1cd1b91aa..f0d20d99b06c3 100644 --- a/pkg/mflag/flag.go +++ b/pkg/mflag/flag.go @@ -486,8 +486,7 @@ func (f *FlagSet) Set(name, value string) error { if !ok { return fmt.Errorf("no such flag -%v", name) } - err := flag.Value.Set(value) - if err != nil { + if err := flag.Value.Set(value); err != nil { return err } if f.actual == nil { diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go index 40d4e5374c801..1087ece992653 100644 --- a/pkg/namesgenerator/names-generator.go +++ b/pkg/namesgenerator/names-generator.go @@ -308,19 +308,19 @@ var ( // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath "yonath", } + + rnd = rand.New(rand.NewSource(time.Now().UnixNano())) ) func GetRandomName(retry int) string { - rand.Seed(time.Now().UnixNano()) - begin: - name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) + name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) if name == "boring_wozniak" /* Steve Wozniak is not boring */ { goto begin } if retry > 0 { - name = fmt.Sprintf("%s%d", name, rand.Intn(10)) + name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) } return name } diff --git a/pkg/parsers/filters/parse.go b/pkg/parsers/filters/parse.go index 9c056bb3cf34a..df5486d515801 100644 --- a/pkg/parsers/filters/parse.go +++ b/pkg/parsers/filters/parse.go @@ -58,8 +58,7 @@ func FromParam(p string) (Args, error) { if len(p) == 0 { return args, nil } - err := json.Unmarshal([]byte(p), &args) - if err != nil { + if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil { return nil, err } return args, nil diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go index 5338a0cfb2561..f366fa67a773c 100644 --- a/pkg/pools/pools.go +++ b/pkg/pools/pools.go @@ -1,5 +1,3 @@ -// +build go1.3 - // Package pools provides a collection of pools which provide various // data types with buffers. These can be used to lower the number of // memory allocations and reuse buffers. diff --git a/pkg/pools/pools_nopool.go b/pkg/pools/pools_nopool.go deleted file mode 100644 index 48903c2396a27..0000000000000 --- a/pkg/pools/pools_nopool.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build !go1.3 - -package pools - -import ( - "bufio" - "io" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - BufioReader32KPool *BufioReaderPool - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -type BufioReaderPool struct { - size int -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{size: size} -} - -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - return bufio.NewReaderSize(r, bufPool.size) -} - -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) -} - -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - return readCloser.Close() - } - return nil - }) -} - -type BufioWriterPool struct { - size int -} - -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{size: size} -} - -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - return bufio.NewWriterSize(w, bufPool.size) -} - -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) -} - -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - return writeCloser.Close() - } - return nil - }) -} diff --git a/pkg/requestdecorator/requestdecorator_test.go b/pkg/requestdecorator/requestdecorator_test.go index b2c1fb3b97215..ed61135467bd6 100644 --- a/pkg/requestdecorator/requestdecorator_test.go +++ b/pkg/requestdecorator/requestdecorator_test.go @@ -1,45 +1,11 @@ package requestdecorator import ( - "encoding/base64" "net/http" "strings" "testing" ) -// The following 2 functions are here for 1.3.3 support -// After we drop 1.3.3 support we can use the functions supported -// in go v1.4.0 + -// BasicAuth returns the username and password provided in the request's -// Authorization header, if the request uses HTTP Basic Authentication. -// See RFC 2617, Section 2. -func basicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - return parseBasicAuth(auth) -} - -// parseBasicAuth parses an HTTP Basic Authentication string. -// "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==" returns ("Aladdin", "open sesame", true). -func parseBasicAuth(auth string) (username, password string, ok bool) { - const prefix = "Basic " - if !strings.HasPrefix(auth, prefix) { - return - } - c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - func TestUAVersionInfo(t *testing.T) { uavi := NewUAVersionInfo("foo", "bar") if !uavi.isValid() { @@ -147,7 +113,7 @@ func TestAuthDecorator(t *testing.T) { t.Fatal(err) } - username, password, ok := basicAuth(reqDecorated) + username, password, ok := reqDecorated.BasicAuth() if !ok { t.Fatalf("Cannot retrieve basic auth info from request") } @@ -180,8 +146,8 @@ func TestRequestFactory(t *testing.T) { requestFactory := NewRequestFactory(ad, uad) - if dlen := len(requestFactory.GetDecorators()); dlen != 2 { - t.Fatalf("Expected to have two decorators, got %d", dlen) + if l := len(requestFactory.GetDecorators()); l != 2 { + t.Fatalf("Expected to have two decorators, got %d", l) } req, err := requestFactory.NewRequest("GET", "/test", strings.NewReader("test")) @@ -189,7 +155,7 @@ func TestRequestFactory(t *testing.T) { t.Fatal(err) } - username, password, ok := basicAuth(req) + username, password, ok := req.BasicAuth() if !ok { t.Fatalf("Cannot retrieve basic auth info from request") } @@ -209,8 +175,8 @@ func TestRequestFactoryNewRequestWithDecorators(t *testing.T) { requestFactory := NewRequestFactory(ad) - if dlen := len(requestFactory.GetDecorators()); dlen != 1 { - t.Fatalf("Expected to have one decorators, got %d", dlen) + if l := len(requestFactory.GetDecorators()); l != 1 { + t.Fatalf("Expected to have one decorators, got %d", l) } ad2 := NewAuthDecorator("test2", "password2") @@ -220,7 +186,7 @@ func TestRequestFactoryNewRequestWithDecorators(t *testing.T) { t.Fatal(err) } - username, password, ok := basicAuth(req) + username, password, ok := req.BasicAuth() if !ok { t.Fatalf("Cannot retrieve basic auth info from request") } @@ -235,15 +201,15 @@ func TestRequestFactoryNewRequestWithDecorators(t *testing.T) { func TestRequestFactoryAddDecorator(t *testing.T) { requestFactory := NewRequestFactory() - if dlen := len(requestFactory.GetDecorators()); dlen != 0 { - t.Fatalf("Expected to have zero decorators, got %d", dlen) + if l := len(requestFactory.GetDecorators()); l != 0 { + t.Fatalf("Expected to have zero decorators, got %d", l) } ad := NewAuthDecorator("test", "password") requestFactory.AddDecorator(ad) - if dlen := len(requestFactory.GetDecorators()); dlen != 1 { - t.Fatalf("Expected to have one decorators, got %d", dlen) + if l := len(requestFactory.GetDecorators()); l != 1 { + t.Fatalf("Expected to have one decorators, got %d", l) } } diff --git a/pkg/resolvconf/resolvconf.go b/pkg/resolvconf/resolvconf.go index d7d53e16d010f..5707b16b7fbf5 100644 --- a/pkg/resolvconf/resolvconf.go +++ b/pkg/resolvconf/resolvconf.go @@ -9,7 +9,7 @@ import ( "sync" "github.com/Sirupsen/logrus" - "github.com/docker/docker/utils" + "github.com/docker/docker/pkg/ioutils" ) var ( @@ -59,7 +59,7 @@ func GetIfChanged() ([]byte, string, error) { if err != nil { return nil, "", err } - newHash, err := utils.HashData(bytes.NewReader(resolv)) + newHash, err := ioutils.HashData(bytes.NewReader(resolv)) if err != nil { return nil, "", err } diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go index ccf1d9dbabad0..dbb74e5a20876 100644 --- a/pkg/stdcopy/stdcopy.go +++ b/pkg/stdcopy/stdcopy.go @@ -52,12 +52,8 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) { // and written to the underlying `w` stream. // This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. // `t` indicates the id of the stream to encapsulate. -// It can be utils.Stdin, utils.Stdout, utils.Stderr. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. func NewStdWriter(w io.Writer, t StdType) *StdWriter { - if len(t) != StdWriterPrefixLen { - return nil - } - return &StdWriter{ Writer: w, prefix: t, diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go index 14e6ed3115ce7..a9fd73a49eddc 100644 --- a/pkg/stdcopy/stdcopy_test.go +++ b/pkg/stdcopy/stdcopy_test.go @@ -3,9 +3,74 @@ package stdcopy import ( "bytes" "io/ioutil" + "strings" "testing" ) +func TestNewStdWriter(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + if writer == nil { + t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") + } +} + +func TestWriteWithUnitializedStdWriter(t *testing.T) { + writer := StdWriter{ + Writer: nil, + prefix: Stdout, + sizeBuf: make([]byte, 4), + } + n, err := writer.Write([]byte("Something here")) + if n != 0 || err == nil { + t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") + } +} + +func TestWriteWithNilBytes(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + n, err := writer.Write(nil) + if err != nil { + t.Fatalf("Shouldn't have fail when given no data") + } + if n > 0 { + t.Fatalf("Write should have written 0 byte, but has written %d", n) + } +} + +func TestWrite(t *testing.T) { + writer := NewStdWriter(ioutil.Discard, Stdout) + data := []byte("Test StdWrite.Write") + n, err := writer.Write(data) + if err != nil { + t.Fatalf("Error while writing with StdWrite") + } + if n != len(data) { + t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) + } +} + +func TestStdCopyWithInvalidInputHeader(t *testing.T) { + dstOut := NewStdWriter(ioutil.Discard, Stdout) + dstErr := NewStdWriter(ioutil.Discard, Stderr) + src := strings.NewReader("Invalid input") + _, err := StdCopy(dstOut, dstErr, src) + if err == nil { + t.Fatal("StdCopy with invalid input header should fail.") + } +} + +func TestStdCopyWithCorruptedPrefix(t *testing.T) { + data := []byte{0x01, 0x02, 0x03} + src := bytes.NewReader(data) + written, err := StdCopy(nil, nil, src) + if err != nil { + t.Fatalf("StdCopy should not return an error with corrupted prefix.") + } + if written != 0 { + t.Fatalf("StdCopy should have written 0, but has written %d", written) + } +} + func BenchmarkWrite(b *testing.B) { w := NewStdWriter(ioutil.Discard, Stdout) data := []byte("Test line for testing stdwriter performance\n") diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go index 383e7adf9e882..90f2b695d3d93 100644 --- a/pkg/streamformatter/streamformatter.go +++ b/pkg/streamformatter/streamformatter.go @@ -3,8 +3,9 @@ package streamformatter import ( "encoding/json" "fmt" - "github.com/docker/docker/pkg/jsonmessage" "io" + + "github.com/docker/docker/pkg/jsonmessage" ) type StreamFormatter struct { diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go index edc432e900710..1dee05aa6c590 100644 --- a/pkg/streamformatter/streamformatter_test.go +++ b/pkg/streamformatter/streamformatter_test.go @@ -3,9 +3,10 @@ package streamformatter import ( "encoding/json" "errors" - "github.com/docker/docker/pkg/jsonmessage" "reflect" "testing" + + "github.com/docker/docker/pkg/jsonmessage" ) func TestFormatStream(t *testing.T) { diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go index bcb0ece57cd2c..e3ebf5d1ed82b 100644 --- a/pkg/stringutils/stringutils.go +++ b/pkg/stringutils/stringutils.go @@ -1,23 +1,12 @@ package stringutils import ( - "crypto/rand" - "encoding/hex" - "io" + "bytes" mathrand "math/rand" + "strings" "time" ) -// Generate 32 chars random string -func GenerateRandomString() string { - id := make([]byte, 32) - - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - return hex.EncodeToString(id) -} - // Generate alpha only random stirng with length n func GenerateRandomAlphaOnlyString(n int) string { // make a really long string @@ -41,3 +30,57 @@ func GenerateRandomAsciiString(n int) string { } return string(res) } + +// Truncate a string to maxlen +func Truncate(s string, maxlen int) string { + if len(s) <= maxlen { + return s + } + return s[:maxlen] +} + +// Test wheather a string is contained in a slice of strings or not. +// Comparison is case insensitive +func InSlice(slice []string, s string) bool { + for _, ss := range slice { + if strings.ToLower(s) == strings.ToLower(ss) { + return true + } + } + return false +} + +func quote(word string, buf *bytes.Buffer) { + // Bail out early for "simple" strings + if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { + buf.WriteString(word) + return + } + + buf.WriteString("'") + + for i := 0; i < len(word); i++ { + b := word[i] + if b == '\'' { + // Replace literal ' with a close ', a \', and a open ' + buf.WriteString("'\\''") + } else { + buf.WriteByte(b) + } + } + + buf.WriteString("'") +} + +// Take a list of strings and escape them so they will be handled right +// when passed as arguments to an program via a shell +func ShellQuoteArguments(args []string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go index 60b848ff5a09f..8dcb4696bb749 100644 --- a/pkg/stringutils/stringutils_test.go +++ b/pkg/stringutils/stringutils_test.go @@ -2,18 +2,19 @@ package stringutils import "testing" -func TestRandomString(t *testing.T) { - str := GenerateRandomString() - if len(str) != 64 { - t.Fatalf("Id returned is incorrect: %s", str) +func testLengthHelper(generator func(int) string, t *testing.T) { + expectedLength := 20 + s := generator(expectedLength) + if len(s) != expectedLength { + t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) } } -func TestRandomStringUniqueness(t *testing.T) { +func testUniquenessHelper(generator func(int) string, t *testing.T) { repeats := 25 set := make(map[string]struct{}, repeats) for i := 0; i < repeats; i = i + 1 { - str := GenerateRandomString() + str := generator(64) if len(str) != 64 { t.Fatalf("Id returned is incorrect: %s", str) } @@ -23,3 +24,64 @@ func TestRandomStringUniqueness(t *testing.T) { set[str] = struct{}{} } } + +func isASCII(s string) bool { + for _, c := range s { + if c > 127 { + return false + } + } + return true +} + +func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAlphaOnlyString, t) +} + +func TestGenerateRandomAsciiStringLength(t *testing.T) { + testLengthHelper(GenerateRandomAsciiString, t) +} + +func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { + testUniquenessHelper(GenerateRandomAsciiString, t) +} + +func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { + str := GenerateRandomAsciiString(64) + if !isASCII(str) { + t.Fatalf("%s contained non-ascii characters", str) + } +} + +func TestTruncate(t *testing.T) { + str := "teststring" + newstr := Truncate(str, 4) + if newstr != "test" { + t.Fatalf("Expected test, got %s", newstr) + } + newstr = Truncate(str, 20) + if newstr != "teststring" { + t.Fatalf("Expected teststring, got %s", newstr) + } +} + +func TestInSlice(t *testing.T) { + slice := []string{"test", "in", "slice"} + + test := InSlice(slice, "test") + if !test { + t.Fatalf("Expected string test to be in slice") + } + test = InSlice(slice, "SLICE") + if !test { + t.Fatalf("Expected string SLICE to be in slice") + } + test = InSlice(slice, "notinslice") + if test { + t.Fatalf("Expected string notinslice not to be in slice") + } +} diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go index 16839bcb4cc16..195a03e9a8ed8 100644 --- a/pkg/sysinfo/sysinfo.go +++ b/pkg/sysinfo/sysinfo.go @@ -13,6 +13,7 @@ import ( type SysInfo struct { MemoryLimit bool SwapLimit bool + CpuCfsQuota bool IPv4ForwardingDisabled bool AppArmor bool } @@ -22,20 +23,28 @@ func New(quiet bool) *SysInfo { sysInfo := &SysInfo{} if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil { if !quiet { - logrus.Warnf("%s", err) + logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err) } } else { - _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.limit_in_bytes")) - _, err2 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.soft_limit_in_bytes")) - sysInfo.MemoryLimit = err1 == nil && err2 == nil - if !sysInfo.MemoryLimit && !quiet { - logrus.Warnf("Your kernel does not support cgroup memory limit.") - } + // If memory cgroup is mounted, MemoryLimit is always enabled. + sysInfo.MemoryLimit = true - _, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) - sysInfo.SwapLimit = err == nil + _, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes")) + sysInfo.SwapLimit = err1 == nil if !sysInfo.SwapLimit && !quiet { - logrus.Warnf("Your kernel does not support cgroup swap limit.") + logrus.Warn("Your kernel does not support swap memory limit.") + } + } + + if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil { + if !quiet { + logrus.Warnf("%v", err) + } + } else { + _, err1 := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us")) + sysInfo.CpuCfsQuota = err1 == nil + if !sysInfo.CpuCfsQuota && !quiet { + logrus.Warn("Your kernel does not support cgroup cfs quotas") } } @@ -45,5 +54,11 @@ func New(quiet bool) *SysInfo { } else { sysInfo.AppArmor = true } + + // Check if Devices cgroup is mounted, it is hard requirement for container security. + if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil { + logrus.Fatalf("Error mounting devices cgroup: %v", err) + } + return sysInfo } diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go index a966cd4881b2b..d0e43b3709784 100644 --- a/pkg/system/lstat.go +++ b/pkg/system/lstat.go @@ -12,8 +12,7 @@ import ( // Throws an error if the file does not exist func Lstat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} - err := syscall.Lstat(path, s) - if err != nil { + if err := syscall.Lstat(path, s); err != nil { return nil, err } return fromStatT(s) diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go index 928ba89e698d4..3899b3e0eeac7 100644 --- a/pkg/system/stat_linux.go +++ b/pkg/system/stat_linux.go @@ -20,8 +20,7 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { // Throws an error if the file does not exist func Stat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} - err := syscall.Stat(path, s) - if err != nil { + if err := syscall.Stat(path, s); err != nil { return nil, err } return fromStatT(s) diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go index ae9516c99cf80..d47cf59b8dff7 100644 --- a/pkg/term/tc_linux_cgo.go +++ b/pkg/term/tc_linux_cgo.go @@ -24,6 +24,7 @@ func MakeRaw(fd uintptr) (*State, error) { newState := oldState.termios C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) + newState.Oflag = newState.Oflag | C.OPOST if err := tcset(fd, &newState); err != 0 { return nil, err } diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go index 5b637928faece..f46c9c8acf01b 100644 --- a/pkg/term/term_windows.go +++ b/pkg/term/term_windows.go @@ -5,6 +5,7 @@ import ( "io" "os" + "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/term/winconsole" ) @@ -57,6 +58,7 @@ func GetWinsize(fd uintptr) (*Winsize, error) { // SetWinsize sets the size of the given terminal connected to the passed file descriptor. func SetWinsize(fd uintptr, ws *Winsize) error { // TODO(azlinux): Implement SetWinsize + logrus.Debugf("[windows] SetWinsize: WARNING -- Unsupported method invoked") return nil } @@ -120,11 +122,10 @@ func MakeRaw(fd uintptr) (*State, error) { mode &^= winconsole.ENABLE_ECHO_INPUT mode &^= winconsole.ENABLE_LINE_INPUT mode &^= winconsole.ENABLE_MOUSE_INPUT - // TODO(azlinux): Enable window input to handle window resizing - mode |= winconsole.ENABLE_WINDOW_INPUT + mode &^= winconsole.ENABLE_WINDOW_INPUT + mode &^= winconsole.ENABLE_PROCESSED_INPUT // Enable these modes - mode |= winconsole.ENABLE_PROCESSED_INPUT mode |= winconsole.ENABLE_EXTENDED_FLAGS mode |= winconsole.ENABLE_INSERT_MODE mode |= winconsole.ENABLE_QUICK_EDIT_MODE diff --git a/pkg/term/winconsole/console_windows.go b/pkg/term/winconsole/console_windows.go index bebf6d7c11cac..ce40a93167f96 100644 --- a/pkg/term/winconsole/console_windows.go +++ b/pkg/term/winconsole/console_windows.go @@ -12,6 +12,8 @@ import ( "sync" "syscall" "unsafe" + + "github.com/Sirupsen/logrus" ) const ( @@ -410,25 +412,25 @@ func getNumberOfChars(fromCoord COORD, toCoord COORD, screenSize COORD) uint32 { var buffer []CHAR_INFO -func clearDisplayRect(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { +func clearDisplayRect(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { var writeRegion SMALL_RECT - writeRegion.Top = fromCoord.Y writeRegion.Left = fromCoord.X + writeRegion.Top = fromCoord.Y writeRegion.Right = toCoord.X writeRegion.Bottom = toCoord.Y // allocate and initialize buffer width := toCoord.X - fromCoord.X + 1 height := toCoord.Y - fromCoord.Y + 1 - size := width * height + size := uint32(width) * uint32(height) if size > 0 { - for i := 0; i < int(size); i++ { - buffer[i].UnicodeChar = WCHAR(fillChar) - buffer[i].Attributes = attributes + buffer := make([]CHAR_INFO, size) + for i := range buffer { + buffer[i] = CHAR_INFO{WCHAR(' '), attributes} } // Write to buffer - r, err := writeConsoleOutput(handle, buffer[:size], windowSize, COORD{X: 0, Y: 0}, &writeRegion) + r, err := writeConsoleOutput(handle, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &writeRegion) if !r { if err != nil { return 0, err @@ -439,18 +441,18 @@ func clearDisplayRect(handle uintptr, fillChar rune, attributes WORD, fromCoord return uint32(size), nil } -func clearDisplayRange(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { +func clearDisplayRange(handle uintptr, attributes WORD, fromCoord COORD, toCoord COORD) (uint32, error) { nw := uint32(0) // start and end on same line if fromCoord.Y == toCoord.Y { - return clearDisplayRect(handle, fillChar, attributes, fromCoord, toCoord, windowSize) + return clearDisplayRect(handle, attributes, fromCoord, toCoord) } // TODO(azlinux): if full screen, optimize // spans more than one line if fromCoord.Y < toCoord.Y { // from start position till end of line for first line - n, err := clearDisplayRect(handle, fillChar, attributes, fromCoord, COORD{X: windowSize.X - 1, Y: fromCoord.Y}, windowSize) + n, err := clearDisplayRect(handle, attributes, fromCoord, COORD{X: toCoord.X, Y: fromCoord.Y}) if err != nil { return nw, err } @@ -458,14 +460,14 @@ func clearDisplayRange(handle uintptr, fillChar rune, attributes WORD, fromCoord // lines between linesBetween := toCoord.Y - fromCoord.Y - 1 if linesBetween > 0 { - n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: windowSize.X - 1, Y: toCoord.Y - 1}, windowSize) + n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: toCoord.X, Y: toCoord.Y - 1}) if err != nil { return nw, err } nw += n } // lines at end - n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord, windowSize) + n, err = clearDisplayRect(handle, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord) if err != nil { return nw, err } @@ -593,6 +595,7 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) n = len(command) parsedCommand := parseAnsiCommand(command) + logrus.Debugf("[windows] HandleOutputCommand: %v", parsedCommand) // console settings changes need to happen in atomic way term.outMutex.Lock() @@ -648,6 +651,7 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) column = int16(screenBufferInfo.Window.Right) + 1 } // The numbers are not 0 based, but 1 based + logrus.Debugf("[windows] HandleOutputCommmand: Moving cursor to (%v,%v)", column-1, line-1) if err := setConsoleCursorPosition(handle, false, column-1, line-1); err != nil { return n, err } @@ -715,9 +719,9 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) switch value { case 0: start = screenBufferInfo.CursorPosition - // end of the screen - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // end of the buffer + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.Size.Y - 1 // cursor cursor = screenBufferInfo.CursorPosition case 1: @@ -733,20 +737,21 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) // start of the screen start.X = 0 start.Y = 0 - // end of the screen - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + // end of the buffer + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.Size.Y - 1 // cursor cursor.X = 0 cursor.Y = 0 } - if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { + if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { return n, err } // remember the the cursor position is 1 based if err := setConsoleCursorPosition(handle, false, int16(cursor.X), int16(cursor.Y)); err != nil { return n, err } + case "K": // [K // Clears all characters from the cursor position to the end of the line (including the character at the cursor position). @@ -766,7 +771,7 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) // start is where cursor is start = screenBufferInfo.CursorPosition // end of line - end.X = screenBufferInfo.MaximumWindowSize.X - 1 + end.X = screenBufferInfo.Size.X - 1 end.Y = screenBufferInfo.CursorPosition.Y // cursor remains the same cursor = screenBufferInfo.CursorPosition @@ -782,15 +787,15 @@ func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) case 2: // start of the line start.X = 0 - start.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + start.Y = screenBufferInfo.CursorPosition.Y - 1 // end of the line - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + end.X = screenBufferInfo.Size.X - 1 + end.Y = screenBufferInfo.CursorPosition.Y - 1 // cursor cursor.X = 0 - cursor.Y = screenBufferInfo.MaximumWindowSize.Y - 1 + cursor.Y = screenBufferInfo.CursorPosition.Y - 1 } - if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { + if _, err := clearDisplayRange(uintptr(handle), term.screenBufferInfo.Attributes, start, end); err != nil { return n, err } // remember the the cursor position is 1 based @@ -1037,8 +1042,7 @@ func (term *WindowsTerminal) HandleInputSequence(fd uintptr, command []byte) (n } func marshal(c COORD) uintptr { - // works only on intel-endian machines - return uintptr(uint32(uint32(uint16(c.Y))<<16 | uint32(uint16(c.X)))) + return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) } // IsConsole returns true if the given file descriptor is a terminal. diff --git a/pkg/term/winconsole/console_windows_test.go b/pkg/term/winconsole/console_windows_test.go index ee9d96834b854..edb5d6f66123a 100644 --- a/pkg/term/winconsole/console_windows_test.go +++ b/pkg/term/winconsole/console_windows_test.go @@ -18,7 +18,7 @@ func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail t.Errorf(format, args) } if expectedValue != value { - t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) + t.Errorf("The value returned does not match expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) t.Errorf(format, args) } } diff --git a/pkg/term/winconsole/term_emulator.go b/pkg/term/winconsole/term_emulator.go index 8c9f34284d885..2d5edc0390e5f 100644 --- a/pkg/term/winconsole/term_emulator.go +++ b/pkg/term/winconsole/term_emulator.go @@ -1,6 +1,7 @@ package winconsole import ( + "fmt" "io" "strconv" "strings" @@ -206,6 +207,21 @@ func (c *ansiCommand) getParam(index int) string { return "" } +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) { if s == "" { return defaultValue, nil diff --git a/pkg/term/winconsole/term_emulator_test.go b/pkg/term/winconsole/term_emulator_test.go index 65de5a79338e1..94104ff51f2f9 100644 --- a/pkg/term/winconsole/term_emulator_test.go +++ b/pkg/term/winconsole/term_emulator_test.go @@ -138,7 +138,7 @@ func TestAssertEqualBytesNegative(t *testing.T) { AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch") }*/ -// Checks that the calls recieved +// Checks that the calls received func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) { text := make([]byte, 0, 3*len(plainText)) cmdIndex := 0 diff --git a/pkg/timeoutconn/timeoutconn.go b/pkg/timeoutconn/timeoutconn.go index 3a554559a4a97..d9534b5da7500 100644 --- a/pkg/timeoutconn/timeoutconn.go +++ b/pkg/timeoutconn/timeoutconn.go @@ -17,8 +17,7 @@ type conn struct { func (c *conn) Read(b []byte) (int, error) { if c.timeout > 0 { - err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)) - if err != nil { + if err := c.Conn.SetReadDeadline(time.Now().Add(c.timeout)); err != nil { return 0, err } } diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md index 52a8bf05d6cb8..6ae7baf743017 100644 --- a/project/GOVERNANCE.md +++ b/project/GOVERNANCE.md @@ -4,7 +4,7 @@ In the spirit of openness, Docker created a Governance Advisory Board, and commi All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at -[Google Docs Folder](http://goo.gl/Alfj8r) +[Google Docs Folder](https://goo.gl/Alfj8r) These include: diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md index 5704b0a2b29c4..d321a900d683d 100644 --- a/project/PACKAGERS.md +++ b/project/PACKAGERS.md @@ -45,9 +45,9 @@ need to package Docker your way, without denaturing it in the process. To build Docker, you will need the following: * A recent version of Git and Mercurial -* Go version 1.3 or later +* Go version 1.4 or later * A clean checkout of the source added to a valid [Go - workspace](http://golang.org/doc/code.html#Workspaces) under the path + workspace](https://golang.org/doc/code.html#Workspaces) under the path *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, explained in more detail below) @@ -237,9 +237,9 @@ are as follows (in order): installed at "/usr/bin/docker", then "/usr/bin/dockerinit" will be the first place this file is searched for) * "/usr/libexec/docker/dockerinit" or "/usr/local/libexec/docker/dockerinit" - ([FHS 3.0 Draft](http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) + ([FHS 3.0 Draft](https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec)) * "/usr/lib/docker/dockerinit" or "/usr/local/lib/docker/dockerinit" ([FHS - 2.3](http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) + 2.3](https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA)) If (and please, only if) one of the paths above is insufficient due to distro policy or similar issues, you may use the `DOCKER_INITPATH` environment variable diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md index 10af71c81d2ec..d2b9650805f72 100644 --- a/project/RELEASE-CHECKLIST.md +++ b/project/RELEASE-CHECKLIST.md @@ -49,7 +49,17 @@ git cherry-pick ... ``` -### 2. Update CHANGELOG.md +### 2. Bump the API version on master + +We don't want to stop contributions to master just because we are releasing. At +the same time, now that the release branch exists, we don't want API changes to +go to the now frozen API version. + +Create a new entry in `docs/sources/reference/api/` by copying the latest and +bumping the version number (in both the file's name and content), and submit +this in a PR against master. + +### 3. Update CHANGELOG.md You can run this command for reference with git 2.0: @@ -124,7 +134,7 @@ git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf Obviously, you'll need to adjust version numbers as necessary. If you just need a count, add a simple `| wc -l`. -### 3. Change the contents of the VERSION file +### 4. Change the contents of the VERSION file Before the big thing, you'll want to make successive release candidates and get people to test. The release candidate number `N` should be part of the version: @@ -134,7 +144,7 @@ export RC_VERSION=${VERSION}-rcN echo ${RC_VERSION#v} > VERSION ``` -### 4. Test the docs +### 5. Test the docs Make sure that your tree includes documentation for any modified or new features, syntax or semantic changes. @@ -145,7 +155,7 @@ To test locally: make docs ``` -To make a shared test at http://beta-docs.docker.io: +To make a shared test at https://beta-docs.docker.io: (You will need the `awsconfig` file added to the `docs/` dir) @@ -153,7 +163,7 @@ To make a shared test at http://beta-docs.docker.io: make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release ``` -### 5. Commit and create a pull request to the "release" branch +### 6. Commit and create a pull request to the "release" branch ```bash git add VERSION CHANGELOG.md @@ -166,7 +176,7 @@ That last command will give you the proper link to visit to ensure that you open the PR against the "release" branch instead of accidentally against "master" (like so many brave souls before you already have). -### 6. Publish release candidate binaries +### 7. Publish release candidate binaries To run this you will need access to the release credentials. Get them from the Core maintainers. @@ -219,7 +229,7 @@ We recommend announcing the release candidate on: - The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group - Any social media that can bring some attention to the release candidate -### 7. Iterate on successive release candidates +### 8. Iterate on successive release candidates Spend several days along with the community explicitly investing time and resources to try and break Docker in every possible way, documenting any @@ -269,7 +279,7 @@ git push -f $GITHUBUSER bump_$VERSION Repeat step 6 to tag the code, publish new binaries, announce availability, and get help testing. -### 8. Finalize the bump branch +### 9. Finalize the bump branch When you're happy with the quality of a release candidate, you can move on and create the real thing. @@ -285,9 +295,9 @@ git commit --amend You will then repeat step 6 to publish the binaries to test -### 9. Get 2 other maintainers to validate the pull request +### 10. Get 2 other maintainers to validate the pull request -### 10. Publish final binaries +### 11. Publish final binaries Once they're tested and reasonably believed to be working, run against get.docker.com: @@ -303,7 +313,7 @@ docker run \ hack/release.sh ``` -### 9. Apply tag +### 12. Apply tag It's very important that we don't make the tag until after the official release is uploaded to get.docker.com! @@ -313,12 +323,12 @@ git tag -a $VERSION -m $VERSION bump_$VERSION git push origin $VERSION ``` -### 10. Go to github to merge the `bump_$VERSION` branch into release +### 13. Go to github to merge the `bump_$VERSION` branch into release Don't forget to push that pretty blue button to delete the leftover branch afterwards! -### 11. Update the docs branch +### 14. Update the docs branch If this is a MAJOR.MINOR.0 release, you need to make an branch for the previous release's documentation: @@ -341,7 +351,7 @@ git push -f origin docs make AWS_S3_BUCKET=docs.docker.com BUILD_ROOT=yes DISTRIBUTION_ID=C2K6......FL2F docs-release ``` -The docs will appear on http://docs.docker.com/ (though there may be cached +The docs will appear on https://docs.docker.com/ (though there may be cached versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). For more information about documentation releases, see `docs/README.md`. @@ -350,7 +360,7 @@ distributed CDN system) is flushed. The `make docs-release` command will do this _if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run and you can check its progress with the CDN Cloudfront Chrome addin. -### 12. Create a new pull request to merge your bump commit back into master +### 15. Create a new pull request to merge your bump commit back into master ```bash git checkout master @@ -364,17 +374,14 @@ echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER: Again, get two maintainers to validate, then merge, then push that pretty blue button to delete your branch. -### 13. Update the API docs and VERSION files +### 16. Update the VERSION files Now that version X.Y.Z is out, time to start working on the next! Update the content of the `VERSION` file to be the next minor (incrementing Y) and add the `-dev` suffix. For example, after 1.5.0 release, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the making"). -Also create a new entry in `docs/sources/reference/api/` by copying the latest -and bumping the version number (in both the file's name and content). - -### 14. Rejoice and Evangelize! +### 17. Rejoice and Evangelize! Congratulations! You're done. diff --git a/project/TOOLS.md b/project/TOOLS.md index f057ccd2befd9..79bd28374d244 100644 --- a/project/TOOLS.md +++ b/project/TOOLS.md @@ -14,11 +14,11 @@ we run Docker in Docker to test. Leeroy is a Go application which integrates Jenkins with GitHub pull requests. Leeroy uses -[GitHub hooks](http://developer.github.com/v3/repos/hooks/) +[GitHub hooks](https://developer.github.com/v3/repos/hooks/) to listen for pull request notifications and starts jobs on your Jenkins server. Using the Jenkins [notification plugin][jnp], Leeroy updates the pull request using GitHub's -[status API](http://developer.github.com/v3/repos/statuses/) +[status API](https://developer.github.com/v3/repos/statuses/) with pending, success, failure, or error statuses. The leeroy repository is maintained at diff --git a/registry/auth.go b/registry/auth.go index 51b781dd92a81..1ac1ca984e3da 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -1,46 +1,21 @@ package registry import ( - "encoding/base64" "encoding/json" - "errors" "fmt" "io/ioutil" "net/http" - "os" - "path" "strings" "sync" "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/requestdecorator" ) -const ( - // Where we store the config file - CONFIGFILE = ".dockercfg" -) - -var ( - ErrConfigFileMissing = errors.New("The Auth config file is missing") -) - -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth"` - Email string `json:"email"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -type ConfigFile struct { - Configs map[string]AuthConfig `json:"configs,omitempty"` - rootPath string -} - type RequestAuthorization struct { - authConfig *AuthConfig + authConfig *cliconfig.AuthConfig registryEndpoint *Endpoint resource string scope string @@ -51,7 +26,7 @@ type RequestAuthorization struct { tokenExpiration time.Time } -func NewRequestAuthorization(authConfig *AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization { +func NewRequestAuthorization(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, resource, scope string, actions []string) *RequestAuthorization { return &RequestAuthorization{ authConfig: authConfig, registryEndpoint: registryEndpoint, @@ -116,116 +91,8 @@ func (auth *RequestAuthorization) Authorize(req *http.Request) error { return nil } -// create a base64 encoded auth string to store in config -func encodeAuth(authConfig *AuthConfig) string { - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decode the auth string -func decodeAuth(authStr string) (string, string, error) { - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// load up the auth config information and return values -// FIXME: use the internal golang config parser -func LoadConfig(rootPath string) (*ConfigFile, error) { - configFile := ConfigFile{Configs: make(map[string]AuthConfig), rootPath: rootPath} - confFile := path.Join(rootPath, CONFIGFILE) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - b, err := ioutil.ReadFile(confFile) - if err != nil { - return &configFile, err - } - - if err := json.Unmarshal(b, &configFile.Configs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return &configFile, fmt.Errorf("The Auth config file is empty") - } - authConfig := AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return &configFile, err - } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return &configFile, fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] - authConfig.ServerAddress = IndexServerAddress() - // *TODO: Switch to using IndexServerName() instead? - configFile.Configs[IndexServerAddress()] = authConfig - } else { - for k, authConfig := range configFile.Configs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return &configFile, err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.Configs[k] = authConfig - } - } - return &configFile, nil -} - -// save the auth config -func SaveConfig(configFile *ConfigFile) error { - confFile := path.Join(configFile.rootPath, CONFIGFILE) - if len(configFile.Configs) == 0 { - os.Remove(confFile) - return nil - } - - configs := make(map[string]AuthConfig, len(configFile.Configs)) - for k, authConfig := range configFile.Configs { - authCopy := authConfig - - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - configs[k] = authCopy - } - - b, err := json.MarshalIndent(configs, "", "\t") - if err != nil { - return err - } - err = ioutil.WriteFile(confFile, b, 0600) - if err != nil { - return err - } - return nil -} - // Login tries to register/login to the registry server. -func Login(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { +func Login(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { // Separates the v2 registry login logic from the v1 logic. if registryEndpoint.Version == APIVersion2 { return loginV2(authConfig, registryEndpoint, factory) @@ -234,7 +101,7 @@ func Login(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *requestd } // loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { +func loginV1(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { var ( status string reqBody []byte @@ -347,7 +214,7 @@ func loginV1(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *reques // now, users should create their account through other means like directly from a web page // served by the v2 registry service provider. Whether this will be supported in the future // is to be determined. -func loginV2(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { +func loginV2(authConfig *cliconfig.AuthConfig, registryEndpoint *Endpoint, factory *requestdecorator.RequestFactory) (string, error) { logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) var ( err error @@ -380,7 +247,7 @@ func loginV2(authConfig *AuthConfig, registryEndpoint *Endpoint, factory *reques return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) } -func tryV2BasicAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error { +func tryV2BasicAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error { req, err := factory.NewRequest("GET", registryEndpoint.Path(""), nil) if err != nil { return err @@ -401,7 +268,7 @@ func tryV2BasicAuthLogin(authConfig *AuthConfig, params map[string]string, regis return nil } -func tryV2TokenAuthLogin(authConfig *AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error { +func tryV2TokenAuthLogin(authConfig *cliconfig.AuthConfig, params map[string]string, registryEndpoint *Endpoint, client *http.Client, factory *requestdecorator.RequestFactory) error { token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint, client, factory) if err != nil { return err @@ -428,10 +295,10 @@ func tryV2TokenAuthLogin(authConfig *AuthConfig, params map[string]string, regis } // this method matches a auth configuration to a server address or a url -func (config *ConfigFile) ResolveAuthConfig(index *IndexInfo) AuthConfig { +func ResolveAuthConfig(config *cliconfig.ConfigFile, index *IndexInfo) cliconfig.AuthConfig { configKey := index.GetAuthConfigKey() // First try the happy case - if c, found := config.Configs[configKey]; found || index.Official { + if c, found := config.AuthConfigs[configKey]; found || index.Official { return c } @@ -450,12 +317,12 @@ func (config *ConfigFile) ResolveAuthConfig(index *IndexInfo) AuthConfig { // Maybe they have a legacy config file, we will iterate the keys converting // them to the new format and testing - for registry, config := range config.Configs { + for registry, ac := range config.AuthConfigs { if configKey == convertToHostname(registry) { - return config + return ac } } // When all else fails, return an empty auth config - return AuthConfig{} + return cliconfig.AuthConfig{} } diff --git a/registry/auth_test.go b/registry/auth_test.go index 9cc299aabe958..71b963a1f1ee7 100644 --- a/registry/auth_test.go +++ b/registry/auth_test.go @@ -3,15 +3,18 @@ package registry import ( "io/ioutil" "os" + "path/filepath" "testing" + + "github.com/docker/docker/cliconfig" ) func TestEncodeAuth(t *testing.T) { - newAuthConfig := &AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &AuthConfig{} + newAuthConfig := &cliconfig.AuthConfig{Username: "ken", Password: "test", Email: "test@example.com"} + authStr := cliconfig.EncodeAuth(newAuthConfig) + decAuthConfig := &cliconfig.AuthConfig{} var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) + decAuthConfig.Username, decAuthConfig.Password, err = cliconfig.DecodeAuth(authStr) if err != nil { t.Fatal(err) } @@ -26,18 +29,16 @@ func TestEncodeAuth(t *testing.T) { } } -func setupTempConfigFile() (*ConfigFile, error) { +func setupTempConfigFile() (*cliconfig.ConfigFile, error) { root, err := ioutil.TempDir("", "docker-test-auth") if err != nil { return nil, err } - configFile := &ConfigFile{ - rootPath: root, - Configs: make(map[string]AuthConfig), - } + root = filepath.Join(root, cliconfig.CONFIGFILE) + configFile := cliconfig.NewConfigFile(root) for _, registry := range []string{"testIndex", IndexServerAddress()} { - configFile.Configs[registry] = AuthConfig{ + configFile.AuthConfigs[registry] = cliconfig.AuthConfig{ Username: "docker-user", Password: "docker-pass", Email: "docker@docker.io", @@ -52,14 +53,14 @@ func TestSameAuthDataPostSave(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.RemoveAll(configFile.rootPath) + defer os.RemoveAll(configFile.Filename()) - err = SaveConfig(configFile) + err = configFile.Save() if err != nil { t.Fatal(err) } - authConfig := configFile.Configs["testIndex"] + authConfig := configFile.AuthConfigs["testIndex"] if authConfig.Username != "docker-user" { t.Fail() } @@ -79,9 +80,9 @@ func TestResolveAuthConfigIndexServer(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.RemoveAll(configFile.rootPath) + defer os.RemoveAll(configFile.Filename()) - indexConfig := configFile.Configs[IndexServerAddress()] + indexConfig := configFile.AuthConfigs[IndexServerAddress()] officialIndex := &IndexInfo{ Official: true, @@ -90,10 +91,10 @@ func TestResolveAuthConfigIndexServer(t *testing.T) { Official: false, } - resolved := configFile.ResolveAuthConfig(officialIndex) + resolved := ResolveAuthConfig(configFile, officialIndex) assertEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to return IndexServerAddress()") - resolved = configFile.ResolveAuthConfig(privateIndex) + resolved = ResolveAuthConfig(configFile, privateIndex) assertNotEqual(t, resolved, indexConfig, "Expected ResolveAuthConfig to not return IndexServerAddress()") } @@ -102,26 +103,26 @@ func TestResolveAuthConfigFullURL(t *testing.T) { if err != nil { t.Fatal(err) } - defer os.RemoveAll(configFile.rootPath) + defer os.RemoveAll(configFile.Filename()) - registryAuth := AuthConfig{ + registryAuth := cliconfig.AuthConfig{ Username: "foo-user", Password: "foo-pass", Email: "foo@example.com", } - localAuth := AuthConfig{ + localAuth := cliconfig.AuthConfig{ Username: "bar-user", Password: "bar-pass", Email: "bar@example.com", } - officialAuth := AuthConfig{ + officialAuth := cliconfig.AuthConfig{ Username: "baz-user", Password: "baz-pass", Email: "baz@example.com", } - configFile.Configs[IndexServerAddress()] = officialAuth + configFile.AuthConfigs[IndexServerAddress()] = officialAuth - expectedAuths := map[string]AuthConfig{ + expectedAuths := map[string]cliconfig.AuthConfig{ "registry.example.com": registryAuth, "localhost:8000": localAuth, "registry.com": localAuth, @@ -157,13 +158,13 @@ func TestResolveAuthConfigFullURL(t *testing.T) { Name: configKey, } for _, registry := range registries { - configFile.Configs[registry] = configured - resolved := configFile.ResolveAuthConfig(index) + configFile.AuthConfigs[registry] = configured + resolved := ResolveAuthConfig(configFile, index) if resolved.Email != configured.Email { t.Errorf("%s -> %q != %q\n", registry, resolved.Email, configured.Email) } - delete(configFile.Configs, registry) - resolved = configFile.ResolveAuthConfig(index) + delete(configFile.AuthConfigs, registry) + resolved = ResolveAuthConfig(configFile, index) if resolved.Email == configured.Email { t.Errorf("%s -> %q == %q\n", registry, resolved.Email, configured.Email) } diff --git a/registry/config.go b/registry/config.go index 3515836d187af..a0a978cc72832 100644 --- a/registry/config.go +++ b/registry/config.go @@ -9,9 +9,9 @@ import ( "regexp" "strings" + "github.com/docker/docker/image" "github.com/docker/docker/opts" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/utils" ) // Options holds command line options. @@ -213,7 +213,7 @@ func validateRemoteName(remoteName string) error { name = nameParts[0] // the repository name must not be a valid image ID - if err := utils.ValidateID(name); err == nil { + if err := image.ValidateID(name); err == nil { return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) } } else { diff --git a/registry/endpoint.go b/registry/endpoint.go index 69a718e12f5c9..84b11a987bda8 100644 --- a/registry/endpoint.go +++ b/registry/endpoint.go @@ -11,8 +11,8 @@ import ( "strings" "github.com/Sirupsen/logrus" + "github.com/docker/distribution/registry/api/v2" "github.com/docker/docker/pkg/requestdecorator" - "github.com/docker/docker/registry/v2" ) // for mocking in unit tests diff --git a/registry/registry_test.go b/registry/registry_test.go index a066de9f8e6ea..3f63eb6e257fa 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/requestdecorator" ) @@ -20,7 +21,7 @@ const ( ) func spawnTestRegistrySession(t *testing.T) *Session { - authConfig := &AuthConfig{} + authConfig := &cliconfig.AuthConfig{} endpoint, err := NewEndpoint(makeIndex("/v1/")) if err != nil { t.Fatal(err) @@ -33,7 +34,7 @@ func spawnTestRegistrySession(t *testing.T) *Session { } func TestPublicSession(t *testing.T) { - authConfig := &AuthConfig{} + authConfig := &cliconfig.AuthConfig{} getSessionDecorators := func(index *IndexInfo) int { endpoint, err := NewEndpoint(index) @@ -735,7 +736,7 @@ func TestSearchRepositories(t *testing.T) { } assertEqual(t, results.NumResults, 1, "Expected 1 search results") assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") - assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' a ot hae 42 stars") + assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") } func TestValidRemoteName(t *testing.T) { diff --git a/registry/service.go b/registry/service.go index cf29732f4903c..87fc1d076f621 100644 --- a/registry/service.go +++ b/registry/service.go @@ -1,5 +1,7 @@ package registry +import "github.com/docker/docker/cliconfig" + type Service struct { Config *ServiceConfig } @@ -15,7 +17,7 @@ func NewService(options *Options) *Service { // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was sucessful. // It can be used to verify the validity of a client's credentials. -func (s *Service) Auth(authConfig *AuthConfig) (string, error) { +func (s *Service) Auth(authConfig *cliconfig.AuthConfig) (string, error) { addr := authConfig.ServerAddress if addr == "" { // Use the official registry address if not specified. @@ -35,7 +37,7 @@ func (s *Service) Auth(authConfig *AuthConfig) (string, error) { // Search queries the public registry for images matching the specified // search terms, and returns the results. -func (s *Service) Search(term string, authConfig *AuthConfig, headers map[string][]string) (*SearchResults, error) { +func (s *Service) Search(term string, authConfig *cliconfig.AuthConfig, headers map[string][]string) (*SearchResults, error) { repoInfo, err := s.ResolveRepository(term) if err != nil { return nil, err diff --git a/registry/session.go b/registry/session.go index 4682a5074cd15..e65f82cd6103e 100644 --- a/registry/session.go +++ b/registry/session.go @@ -18,21 +18,21 @@ import ( "time" "github.com/Sirupsen/logrus" + "github.com/docker/docker/cliconfig" "github.com/docker/docker/pkg/httputils" "github.com/docker/docker/pkg/requestdecorator" "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/utils" ) type Session struct { - authConfig *AuthConfig + authConfig *cliconfig.AuthConfig reqFactory *requestdecorator.RequestFactory indexEndpoint *Endpoint jar *cookiejar.Jar timeout TimeoutType } -func NewSession(authConfig *AuthConfig, factory *requestdecorator.RequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { +func NewSession(authConfig *cliconfig.AuthConfig, factory *requestdecorator.RequestFactory, endpoint *Endpoint, timeout bool) (r *Session, err error) { r = &Session{ authConfig: authConfig, indexEndpoint: endpoint, @@ -54,7 +54,7 @@ func NewSession(authConfig *AuthConfig, factory *requestdecorator.RequestFactory if err != nil { return nil, err } - if info.Standalone { + if info.Standalone && authConfig != nil && factory != nil { logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", r.indexEndpoint.String()) dec := requestdecorator.NewAuthDecorator(authConfig.Username, authConfig.Password) factory.AddDecorator(dec) @@ -86,7 +86,7 @@ func (r *Session) GetRemoteHistory(imgID, registry string, token []string) ([]st if res.StatusCode == 401 { return nil, errLoginRequired } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) } jsonString, err := ioutil.ReadAll(res.Body) @@ -115,7 +115,7 @@ func (r *Session) LookupRemoteImage(imgID, registry string, token []string) erro } res.Body.Close() if res.StatusCode != 200 { - return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } return nil } @@ -134,7 +134,7 @@ func (r *Session) GetRemoteImageJSON(imgID, registry string, token []string) ([] } defer res.Body.Close() if res.StatusCode != 200 { - return nil, -1, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) + return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) } // if the size header is not present, then set it to '-1' imageSize := -1 @@ -223,11 +223,12 @@ func (r *Session) GetRemoteTags(registries []string, repository string, token [] logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) defer res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 404 { - continue - } else if res.StatusCode == 404 { + if res.StatusCode == 404 { return nil, fmt.Errorf("Repository not found") } + if res.StatusCode != 200 { + continue + } result := make(map[string]string) if err := json.NewDecoder(res.Body).Decode(&result); err != nil { @@ -282,13 +283,13 @@ func (r *Session) GetRepositoryData(remote string) (*RepositoryData, error) { // TODO: Right now we're ignoring checksums in the response body. // In the future, we need to use them to check image validity. if res.StatusCode == 404 { - return nil, utils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) } else if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, remote, errBody), res) } var tokens []string @@ -379,12 +380,12 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } defer res.Body.Close() if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return utils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) + return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) } if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } var jsonBody map[string]string if err := json.Unmarshal(errBody, &jsonBody); err != nil { @@ -392,7 +393,7 @@ func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, regist } else if jsonBody["error"] == "Image already exists" { return ErrAlreadyExists } - return utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) } return nil } @@ -432,9 +433,9 @@ func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry if res.StatusCode != 200 { errBody, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", utils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) } - return "", "", utils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) + return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) } checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) @@ -461,7 +462,7 @@ func (r *Session) PushRegistryTag(remote, revision, tag, registry string, token } res.Body.Close() if res.StatusCode != 200 && res.StatusCode != 201 { - return utils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote), res) } return nil } @@ -523,23 +524,21 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote, errBody), res) } - if res.Header.Get("X-Docker-Token") != "" { - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - } else { + if res.Header.Get("X-Docker-Token") == "" { return nil, fmt.Errorf("Index response didn't contain an access token") } + tokens = res.Header["X-Docker-Token"] + logrus.Debugf("Auth token: %v", tokens) - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) - if err != nil { - return nil, err - } - } else { + if res.Header.Get("X-Docker-Endpoints") == "" { return nil, fmt.Errorf("Index response didn't contain any endpoints") } + endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) + if err != nil { + return nil, err + } } if validate { if res.StatusCode != 204 { @@ -547,7 +546,7 @@ func (r *Session) PushImageJSONIndex(remote string, imgList []*ImgData, validate if err != nil { logrus.Debugf("Error reading response body: %s", err) } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote, errBody), res) } } @@ -595,19 +594,18 @@ func (r *Session) SearchRepositories(term string) (*SearchResults, error) { } defer res.Body.Close() if res.StatusCode != 200 { - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) } result := new(SearchResults) - err = json.NewDecoder(res.Body).Decode(result) - return result, err + return result, json.NewDecoder(res.Body).Decode(result) } -func (r *Session) GetAuthConfig(withPasswd bool) *AuthConfig { +func (r *Session) GetAuthConfig(withPasswd bool) *cliconfig.AuthConfig { password := "" if withPasswd { password = r.authConfig.Password } - return &AuthConfig{ + return &cliconfig.AuthConfig{ Username: r.authConfig.Username, Password: password, Email: r.authConfig.Email, diff --git a/registry/session_v2.go b/registry/session_v2.go index a01c8b9ab2441..4188e505bda51 100644 --- a/registry/session_v2.go +++ b/registry/session_v2.go @@ -11,8 +11,8 @@ import ( "github.com/Sirupsen/logrus" "github.com/docker/distribution/digest" - "github.com/docker/docker/registry/v2" - "github.com/docker/docker/utils" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/docker/pkg/httputils" ) const DockerDigestHeader = "Docker-Content-Digest" @@ -95,7 +95,7 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au } else if res.StatusCode == 404 { return nil, "", ErrDoesNotExist } - return nil, "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) + return nil, "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s:%s", res.StatusCode, imageName, tagName), res) } manifestBytes, err := ioutil.ReadAll(res.Body) @@ -109,8 +109,8 @@ func (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, au // - Succeeded to head image blob (already exists) // - Failed with no error (continue to Push the Blob) // - Failed with error -func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) { - routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) +func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (bool, error) { + routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst) if err != nil { return false, err } @@ -141,11 +141,11 @@ func (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, return false, nil } - return false, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s:%s", res.StatusCode, imageName, sumType, sum), res) + return false, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying head request for %s - %s", res.StatusCode, imageName, dgst), res) } -func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error { - routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) +func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobWrtr io.Writer, auth *RequestAuthorization) error { + routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst) if err != nil { return err } @@ -168,15 +168,15 @@ func (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, b if res.StatusCode == 401 { return errLoginRequired } - return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob", res.StatusCode, imageName), res) } _, err = io.Copy(blobWrtr, res.Body) return err } -func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) { - routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+":"+sum) +func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName string, dgst digest.Digest, auth *RequestAuthorization) (io.ReadCloser, int64, error) { + routeURL, err := getV2Builder(ep).BuildBlobURL(imageName, dgst) if err != nil { return nil, 0, err } @@ -198,7 +198,7 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str if res.StatusCode == 401 { return nil, 0, errLoginRequired } - return nil, 0, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s:%s", res.StatusCode, imageName, sumType, sum), res) + return nil, 0, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to pull %s blob - %s", res.StatusCode, imageName, dgst), res) } lenStr := res.Header.Get("Content-Length") l, err := strconv.ParseInt(lenStr, 10, 64) @@ -212,7 +212,7 @@ func (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum str // Push the image to the server for storage. // 'layer' is an uncompressed reader of the blob to be pushed. // The server will generate it's own checksum calculation. -func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error { +func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName string, dgst digest.Digest, blobRdr io.Reader, auth *RequestAuthorization) error { location, err := r.initiateBlobUpload(ep, imageName, auth) if err != nil { return err @@ -225,7 +225,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string return err } queryParams := req.URL.Query() - queryParams.Add("digest", sumType+":"+sumStr) + queryParams.Add("digest", dgst.String()) req.URL.RawQuery = queryParams.Encode() if err := auth.Authorize(req); err != nil { return err @@ -245,7 +245,7 @@ func (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string return err } logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) - return utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s:%s", res.StatusCode, imageName, sumType, sumStr), res) + return httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s blob - %s", res.StatusCode, imageName, dgst), res) } return nil @@ -286,7 +286,7 @@ func (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *Reque } logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) - return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res) + return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: unexpected %d response status trying to initiate upload of %s", res.StatusCode, imageName), res) } if location = res.Header.Get("Location"); location == "" { @@ -328,7 +328,7 @@ func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, si return "", err } logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header) - return "", utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) + return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res) } hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader)) @@ -384,13 +384,11 @@ func (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestA } else if res.StatusCode == 404 { return nil, ErrDoesNotExist } - return nil, utils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) + return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch for %s", res.StatusCode, imageName), res) } - decoder := json.NewDecoder(res.Body) var remote remoteTags - err = decoder.Decode(&remote) - if err != nil { + if err := json.NewDecoder(res.Body).Decode(&remote); err != nil { return nil, fmt.Errorf("Error while decoding the http response: %s", err) } return remote.Tags, nil diff --git a/registry/v2/descriptors.go b/registry/v2/descriptors.go deleted file mode 100644 index 68d182411d9c1..0000000000000 --- a/registry/v2/descriptors.go +++ /dev/null @@ -1,144 +0,0 @@ -package v2 - -import "net/http" - -// TODO(stevvooe): Add route descriptors for each named route, along with -// accepted methods, parameters, returned status codes and error codes. - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCodes provides a list of status under which this error - // condition may arise. If it is empty, the error condition may be seen - // for any status code. - HTTPStatusCodes []int -} - -// ErrorDescriptors provides a list of HTTP API Error codes that may be -// encountered when interacting with the registry API. -var ErrorDescriptors = []ErrorDescriptor{ - { - Code: ErrorCodeUnknown, - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - }, - { - Code: ErrorCodeDigestInvalid, - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeSizeInvalid, - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeNameInvalid, - Value: "NAME_INVALID", - Message: "manifest name did not match URI", - Description: `During a manifest upload, if the name in the manifest - does not match the uri name, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeTagInvalid, - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - { - Code: ErrorCodeNameUnknown, - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestUnknown, - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, - { - Code: ErrorCodeManifestInvalid, - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeManifestUnverified, - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCodes: []int{http.StatusBadRequest}, - }, - { - Code: ErrorCodeBlobUnknown, - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, - }, - - { - Code: ErrorCodeBlobUploadUnknown, - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCodes: []int{http.StatusNotFound}, - }, -} - -var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor -var idToDescriptors map[string]ErrorDescriptor - -func init() { - errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(ErrorDescriptors)) - idToDescriptors = make(map[string]ErrorDescriptor, len(ErrorDescriptors)) - - for _, descriptor := range ErrorDescriptors { - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - } -} diff --git a/registry/v2/regexp.go b/registry/v2/regexp.go deleted file mode 100644 index 07484dcd69538..0000000000000 --- a/registry/v2/regexp.go +++ /dev/null @@ -1,22 +0,0 @@ -package v2 - -import "regexp" - -// This file defines regular expressions for use in route definition. These -// are also defined in the registry code base. Until they are in a common, -// shared location, and exported, they must be repeated here. - -// RepositoryNameComponentRegexp restricts registtry path components names to -// start with at least two letters or numbers, with following parts able to -// separated by one period, dash or underscore. -var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) - -// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow 1 to -// 5 path components, separated by a forward slash. -var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/){0,4}` + RepositoryNameComponentRegexp.String()) - -// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. -var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+`) diff --git a/registry/v2/routes.go b/registry/v2/routes.go deleted file mode 100644 index de0a38fb815a5..0000000000000 --- a/registry/v2/routes.go +++ /dev/null @@ -1,66 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - router := mux.NewRouter(). - StrictSlash(true) - - // GET /v2/ Check Check that the registry implements API version 2(.1) - router. - Path("/v2/"). - Name(RouteNameBase) - - // GET /v2//manifest/ Image Manifest Fetch the image manifest identified by name and reference where reference can be a tag or digest. - // PUT /v2//manifest/ Image Manifest Upload the image manifest identified by name and reference where reference can be a tag or digest. - // DELETE /v2//manifest/ Image Manifest Delete the image identified by name and reference where reference can be a tag or digest. - router. - Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + DigestRegexp.String() + "}"). - Name(RouteNameManifest) - - // GET /v2//tags/list Tags Fetch the tags under the repository identified by name. - router. - Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list"). - Name(RouteNameTags) - - // GET /v2//blob/ Layer Fetch the blob identified by digest. - router. - Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:[a-zA-Z0-9-_+.]+:[a-zA-Z0-9-_+.=]+}"). - Name(RouteNameBlob) - - // POST /v2//blob/upload/ Layer Upload Initiate an upload of the layer identified by tarsum. - router. - Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/"). - Name(RouteNameBlobUpload) - - // GET /v2//blob/upload/ Layer Upload Get the status of the upload identified by tarsum and uuid. - // PUT /v2//blob/upload/ Layer Upload Upload all or a chunk of the upload identified by tarsum and uuid. - // DELETE /v2//blob/upload/ Layer Upload Cancel the upload identified by layer and uuid - router. - Path("/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}"). - Name(RouteNameBlobUploadChunk) - - return router -} diff --git a/registry/v2/urls_test.go b/registry/v2/urls_test.go deleted file mode 100644 index f30c96c0affdf..0000000000000 --- a/registry/v2/urls_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package v2 - -import ( - "net/url" - "testing" -) - -type urlBuilderTestCase struct { - description string - expectedPath string - build func() (string, error) -} - -// TestURLBuilder tests the various url building functions, ensuring they are -// returning the expected values. -func TestURLBuilder(t *testing.T) { - var ( - urlBuilder *URLBuilder - err error - ) - - testCases := []urlBuilderTestCase{ - { - description: "test base url", - expectedPath: "/v2/", - build: func() (string, error) { - return urlBuilder.BuildBaseURL() - }, - }, - { - description: "test tags url", - expectedPath: "/v2/foo/bar/tags/list", - build: func() (string, error) { - return urlBuilder.BuildTagsURL("foo/bar") - }, - }, - { - description: "test manifest url", - expectedPath: "/v2/foo/bar/manifests/tag", - build: func() (string, error) { - return urlBuilder.BuildManifestURL("foo/bar", "tag") - }, - }, - { - description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", - build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") - }, - }, - { - description: "build blob upload url", - expectedPath: "/v2/foo/bar/blobs/uploads/", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar") - }, - }, - { - description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ - "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, - }) - }, - }, - { - description: "build blob upload chunk url", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") - }, - }, - { - description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", - build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ - "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, - }) - }, - }, - } - - roots := []string{ - "http://example.com", - "https://example.com", - "http://localhost:5000", - "https://localhost:5443", - } - - for _, root := range roots { - urlBuilder, err = NewURLBuilderFromString(root) - if err != nil { - t.Fatalf("unexpected error creating urlbuilder: %v", err) - } - - for _, testCase := range testCases { - url, err := testCase.build() - if err != nil { - t.Fatalf("%s: error building url: %v", testCase.description, err) - } - - expectedURL := root + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) - } - } - } -} diff --git a/runconfig/compare.go b/runconfig/compare.go index 60a21a79c0b16..1d969e9be88e7 100644 --- a/runconfig/compare.go +++ b/runconfig/compare.go @@ -10,25 +10,25 @@ func Compare(a, b *Config) bool { if a.AttachStdout != b.AttachStdout || a.AttachStderr != b.AttachStderr || a.User != b.User || - a.Memory != b.Memory || - a.MemorySwap != b.MemorySwap || - a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || a.Tty != b.Tty { return false } - if len(a.Cmd) != len(b.Cmd) || + + if a.Cmd.Len() != b.Cmd.Len() || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || + a.Entrypoint.Len() != b.Entrypoint.Len() || len(a.Volumes) != len(b.Volumes) { return false } - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { + aCmd := a.Cmd.Slice() + bCmd := b.Cmd.Slice() + for i := 0; i < len(aCmd); i++ { + if aCmd[i] != bCmd[i] { return false } } @@ -52,8 +52,11 @@ func Compare(a, b *Config) bool { return false } } - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { + + aEntrypoint := a.Entrypoint.Slice() + bEntrypoint := b.Entrypoint.Slice() + for i := 0; i < len(aEntrypoint); i++ { + if aEntrypoint[i] != bEntrypoint[i] { return false } } diff --git a/runconfig/config.go b/runconfig/config.go index 45255e9b01ebd..844958be2c3ca 100644 --- a/runconfig/config.go +++ b/runconfig/config.go @@ -1,10 +1,103 @@ package runconfig import ( - "github.com/docker/docker/engine" + "encoding/json" + "io" + "github.com/docker/docker/nat" ) +// Entrypoint encapsulates the container entrypoint. +// It might be represented as a string or an array of strings. +// We need to override the json decoder to accept both options. +// The JSON decoder will fail if the api sends an string and +// we try to decode it into an array of string. +type Entrypoint struct { + parts []string +} + +func (e *Entrypoint) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Entrypoint) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + p = append(p, string(b)) + } + e.parts = p + return nil +} + +func (e *Entrypoint) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Entrypoint) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewEntrypoint(parts ...string) *Entrypoint { + return &Entrypoint{parts} +} + +type Command struct { + parts []string +} + +func (e *Command) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Command) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + p = append(p, string(b)) + } + e.parts = p + return nil +} + +func (e *Command) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Command) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewCommand(parts ...string) *Command { + return &Command{parts} +} + // Note: the Config structure should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. @@ -12,10 +105,6 @@ type Config struct { Hostname string Domainname string User string - Memory int64 // FIXME: we keep it for backward compatibility, it has been moved to hostConfig. - MemorySwap int64 // FIXME: it has been moved to hostConfig. - CpuShares int64 // FIXME: it has been moved to hostConfig. - Cpuset string // FIXME: it has been moved to hostConfig and renamed to CpusetCpus. AttachStdin bool AttachStdout bool AttachStderr bool @@ -25,53 +114,37 @@ type Config struct { OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string - Cmd []string + Cmd *Command Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} WorkingDir string - Entrypoint []string + Entrypoint *Entrypoint NetworkDisabled bool MacAddress string OnBuild []string Labels map[string]string } -func ContainerConfigFromJob(job *engine.Job) *Config { - config := &Config{ - Hostname: job.Getenv("Hostname"), - Domainname: job.Getenv("Domainname"), - User: job.Getenv("User"), - Memory: job.GetenvInt64("Memory"), - MemorySwap: job.GetenvInt64("MemorySwap"), - CpuShares: job.GetenvInt64("CpuShares"), - Cpuset: job.Getenv("Cpuset"), - AttachStdin: job.GetenvBool("AttachStdin"), - AttachStdout: job.GetenvBool("AttachStdout"), - AttachStderr: job.GetenvBool("AttachStderr"), - Tty: job.GetenvBool("Tty"), - OpenStdin: job.GetenvBool("OpenStdin"), - StdinOnce: job.GetenvBool("StdinOnce"), - Image: job.Getenv("Image"), - WorkingDir: job.Getenv("WorkingDir"), - NetworkDisabled: job.GetenvBool("NetworkDisabled"), - MacAddress: job.Getenv("MacAddress"), - } - job.GetenvJson("ExposedPorts", &config.ExposedPorts) - job.GetenvJson("Volumes", &config.Volumes) - if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { - config.PortSpecs = PortSpecs - } - if Env := job.GetenvList("Env"); Env != nil { - config.Env = Env - } - if Cmd := job.GetenvList("Cmd"); Cmd != nil { - config.Cmd = Cmd +type ContainerConfigWrapper struct { + *Config + *hostConfigWrapper +} + +func (c ContainerConfigWrapper) HostConfig() *HostConfig { + if c.hostConfigWrapper == nil { + return new(HostConfig) } - job.GetenvJson("Labels", &config.Labels) + return c.hostConfigWrapper.GetHostConfig() +} - if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { - config.Entrypoint = Entrypoint +func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, nil, err } - return config + + return w.Config, w.HostConfig(), nil } diff --git a/runconfig/config_test.go b/runconfig/config_test.go index accbd9107e316..87fc6c6aaca34 100644 --- a/runconfig/config_test.go +++ b/runconfig/config_test.go @@ -1,7 +1,9 @@ package runconfig import ( + "bytes" "fmt" + "io/ioutil" "strings" "testing" @@ -102,7 +104,7 @@ func TestParseRunVolumes(t *testing.T) { if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/var"]; !exists { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } @@ -260,5 +262,39 @@ func TestMerge(t *testing.T) { t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) } } +} + +func TestDecodeContainerConfig(t *testing.T) { + fixtures := []struct { + file string + entrypoint *Entrypoint + }{ + {"fixtures/container_config_1_14.json", NewEntrypoint()}, + {"fixtures/container_config_1_17.json", NewEntrypoint("bash")}, + {"fixtures/container_config_1_19.json", NewEntrypoint("bash")}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != "ubuntu" { + t.Fatalf("Expected ubuntu image, found %s\n", c.Image) + } + if c.Entrypoint.Len() != f.entrypoint.Len() { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } } diff --git a/runconfig/exec.go b/runconfig/exec.go index 1bcdad1599b94..8fe05be1bb3d3 100644 --- a/runconfig/exec.go +++ b/runconfig/exec.go @@ -1,9 +1,6 @@ package runconfig import ( - "fmt" - - "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" ) @@ -19,34 +16,15 @@ type ExecConfig struct { Cmd []string } -func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { - execConfig := &ExecConfig{ - // TODO(vishh): Expose 'User' once it is supported. - //User: job.Getenv("User"), - // TODO(vishh): Expose 'Privileged' once it is supported. - //Privileged: job.GetenvBool("Privileged"), - Tty: job.GetenvBool("Tty"), - AttachStdin: job.GetenvBool("AttachStdin"), - AttachStderr: job.GetenvBool("AttachStderr"), - AttachStdout: job.GetenvBool("AttachStdout"), - } - cmd := job.GetenvList("Cmd") - if len(cmd) == 0 { - return nil, fmt.Errorf("No exec command specified") - } - - execConfig.Cmd = cmd - - return execConfig, nil -} - func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - execCmd []string - container string + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string ) cmd.Require(flag.Min, 2) if err := cmd.ParseFlags(args, true); err != nil { @@ -57,10 +35,8 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { execCmd = parsedArgs[1:] execConfig := &ExecConfig{ - // TODO(vishh): Expose '-u' flag once it is supported. - User: "", - // TODO(vishh): Expose '-p' flag once it is supported. - Privileged: false, + User: *flUser, + Privileged: *flPrivileged, Tty: *flTty, Cmd: execCmd, Container: container, diff --git a/runconfig/fixtures/container_config_1_14.json b/runconfig/fixtures/container_config_1_14.json new file mode 100644 index 0000000000000..b08334c0950e4 --- /dev/null +++ b/runconfig/fixtures/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/runconfig/fixtures/container_config_1_17.json b/runconfig/fixtures/container_config_1_17.json new file mode 100644 index 0000000000000..60fc6e25e20d9 --- /dev/null +++ b/runconfig/fixtures/container_config_1_17.json @@ -0,0 +1,49 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/runconfig/fixtures/container_config_1_19.json b/runconfig/fixtures/container_config_1_19.json new file mode 100644 index 0000000000000..9a3ce205b3694 --- /dev/null +++ b/runconfig/fixtures/container_config_1_19.json @@ -0,0 +1,57 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go index 84d636b5c4beb..171671b6efddd 100644 --- a/runconfig/hostconfig.go +++ b/runconfig/hostconfig.go @@ -1,14 +1,19 @@ package runconfig import ( + "encoding/json" + "io" "strings" - "github.com/docker/docker/engine" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/utils" ) +type KeyValuePair struct { + Key string + Value string +} + type NetworkMode string // IsPrivate indicates whether container use it's private network stack @@ -104,14 +109,65 @@ type LogConfig struct { Config map[string]string } +type LxcConfig struct { + values []KeyValuePair +} + +func (c *LxcConfig) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + return json.Marshal(c.Slice()) +} + +func (c *LxcConfig) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + var kv []KeyValuePair + if err := json.Unmarshal(b, &kv); err != nil { + var h map[string]string + if err := json.Unmarshal(b, &h); err != nil { + return err + } + for k, v := range h { + kv = append(kv, KeyValuePair{k, v}) + } + } + c.values = kv + + return nil +} + +func (c *LxcConfig) Len() int { + if c == nil { + return 0 + } + return len(c.values) +} + +func (c *LxcConfig) Slice() []KeyValuePair { + if c == nil { + return nil + } + return c.values +} + +func NewLxcConfig(values []KeyValuePair) *LxcConfig { + return &LxcConfig{values} +} + type HostConfig struct { Binds []string ContainerIDFile string - LxcConf []utils.KeyValuePair + LxcConf *LxcConfig Memory int64 // Memory limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap CpuShares int64 // CPU shares (relative weight vs. other containers) CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + CpuQuota int64 Privileged bool PortBindings nat.PortMap Links []string @@ -134,96 +190,55 @@ type HostConfig struct { CgroupParent string // Parent cgroup. } -// This is used by the create command when you want to set both the -// Config and the HostConfig in the same call -type ConfigAndHostConfig struct { - Config - HostConfig HostConfig +func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { + return &ContainerConfigWrapper{ + config, + &hostConfigWrapper{InnerHostConfig: hostConfig}, + } } -func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig { - return &ConfigAndHostConfig{ - *config, - *hostConfig, - } +type hostConfigWrapper struct { + InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + + *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. } -func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { - if job.EnvExists("HostConfig") { - hostConfig := HostConfig{} - job.GetenvJson("HostConfig", &hostConfig) +func (w hostConfigWrapper) GetHostConfig() *HostConfig { + hc := w.HostConfig - // FIXME: These are for backward compatibility, if people use these - // options with `HostConfig`, we should still make them workable. - if job.EnvExists("Memory") && hostConfig.Memory == 0 { - hostConfig.Memory = job.GetenvInt64("Memory") - } - if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 { - hostConfig.MemorySwap = job.GetenvInt64("MemorySwap") + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory } - if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 { - hostConfig.CpuShares = job.GetenvInt64("CpuShares") + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap } - if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { - hostConfig.CpusetCpus = job.Getenv("Cpuset") + if hc.CpuShares != 0 && w.InnerHostConfig.CpuShares == 0 { + w.InnerHostConfig.CpuShares = hc.CpuShares } - return &hostConfig + hc = w.InnerHostConfig } - hostConfig := &HostConfig{ - ContainerIDFile: job.Getenv("ContainerIDFile"), - Memory: job.GetenvInt64("Memory"), - MemorySwap: job.GetenvInt64("MemorySwap"), - CpuShares: job.GetenvInt64("CpuShares"), - CpusetCpus: job.Getenv("CpusetCpus"), - Privileged: job.GetenvBool("Privileged"), - PublishAllPorts: job.GetenvBool("PublishAllPorts"), - NetworkMode: NetworkMode(job.Getenv("NetworkMode")), - IpcMode: IpcMode(job.Getenv("IpcMode")), - PidMode: PidMode(job.Getenv("PidMode")), - ReadonlyRootfs: job.GetenvBool("ReadonlyRootfs"), - CgroupParent: job.Getenv("CgroupParent"), + if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset } - // FIXME: This is for backward compatibility, if people use `Cpuset` - // in json, make it workable, we will only pass hostConfig.CpusetCpus - // to execDriver. - if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { - hostConfig.CpusetCpus = job.Getenv("Cpuset") - } + return hc +} - job.GetenvJson("LxcConf", &hostConfig.LxcConf) - job.GetenvJson("PortBindings", &hostConfig.PortBindings) - job.GetenvJson("Devices", &hostConfig.Devices) - job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) - job.GetenvJson("Ulimits", &hostConfig.Ulimits) - job.GetenvJson("LogConfig", &hostConfig.LogConfig) - hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") - if Binds := job.GetenvList("Binds"); Binds != nil { - hostConfig.Binds = Binds - } - if Links := job.GetenvList("Links"); Links != nil { - hostConfig.Links = Links - } - if Dns := job.GetenvList("Dns"); Dns != nil { - hostConfig.Dns = Dns - } - if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { - hostConfig.DnsSearch = DnsSearch - } - if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { - hostConfig.ExtraHosts = ExtraHosts - } - if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { - hostConfig.VolumesFrom = VolumesFrom - } - if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { - hostConfig.CapAdd = CapAdd - } - if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { - hostConfig.CapDrop = CapDrop +func DecodeHostConfig(src io.Reader) (*HostConfig, error) { + decoder := json.NewDecoder(src) + + var w hostConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err } - return hostConfig + hc := w.GetHostConfig() + + return hc, nil } diff --git a/runconfig/merge.go b/runconfig/merge.go index 68d3d6ee125e5..9c9a3b4367553 100644 --- a/runconfig/merge.go +++ b/runconfig/merge.go @@ -11,15 +11,6 @@ func Merge(userConf, imageConf *Config) error { if userConf.User == "" { userConf.User = imageConf.User } - if userConf.Memory == 0 { - userConf.Memory = imageConf.Memory - } - if userConf.MemorySwap == 0 { - userConf.MemorySwap = imageConf.MemorySwap - } - if userConf.CpuShares == 0 { - userConf.CpuShares = imageConf.CpuShares - } if len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { @@ -50,7 +41,7 @@ func Merge(userConf, imageConf *Config) error { } if len(imageConf.PortSpecs) > 0 { // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. - logrus.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + logrus.Debugf("Migrating image port specs to container: %s", strings.Join(imageConf.PortSpecs, ", ")) if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } @@ -94,8 +85,8 @@ func Merge(userConf, imageConf *Config) error { userConf.Labels = imageConf.Labels } - if len(userConf.Entrypoint) == 0 { - if len(userConf.Cmd) == 0 { + if userConf.Entrypoint.Len() == 0 { + if userConf.Cmd.Len() == 0 { userConf.Cmd = imageConf.Cmd } diff --git a/runconfig/parse.go b/runconfig/parse.go index 1fb36e4ace539..47feac866a636 100644 --- a/runconfig/parse.go +++ b/runconfig/parse.go @@ -12,7 +12,6 @@ import ( "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/pkg/units" - "github.com/docker/docker/utils" ) var ( @@ -65,6 +64,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") @@ -186,21 +187,22 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe var ( parsedArgs = cmd.Args() - runCmd []string - entrypoint []string + runCmd *Command + entrypoint *Entrypoint image = cmd.Arg(0) ) if len(parsedArgs) > 1 { - runCmd = parsedArgs[1:] + runCmd = NewCommand(parsedArgs[1:]...) } if *flEntrypoint != "" { - entrypoint = []string{*flEntrypoint} + entrypoint = NewEntrypoint(*flEntrypoint) } - lxcConf, err := parseKeyValueOpts(flLxcOpts) + lc, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } + lxcConf := NewLxcConfig(lc) var ( domainname string @@ -275,7 +277,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) } - restartPolicy, err := parseRestartPolicy(*flRestartPolicy) + restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) if err != nil { return nil, nil, cmd, err } @@ -289,10 +291,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe Tty: *flTty, NetworkDisabled: !*flNetwork, OpenStdin: *flStdin, - Memory: flMemory, // FIXME: for backward compatibility - MemorySwap: MemorySwap, // FIXME: for backward compatibility - CpuShares: *flCpuShares, // FIXME: for backward compatibility - Cpuset: *flCpusetCpus, // FIXME: for backward compatibility AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, @@ -314,6 +312,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe MemorySwap: MemorySwap, CpuShares: *flCpuShares, CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CpuQuota: *flCpuQuota, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), @@ -374,8 +374,8 @@ func convertKVStringsToMap(values []string) map[string]string { return result } -// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func parseRestartPolicy(policy string) (RestartPolicy, error) { +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (RestartPolicy, error) { p := RestartPolicy{} if policy == "" { @@ -430,14 +430,14 @@ func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { return out, nil } -func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { - out := make([]utils.KeyValuePair, opts.Len()) +func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { return nil, err } - out[i] = utils.KeyValuePair{Key: k, Value: v} + out[i] = KeyValuePair{Key: k, Value: v} } return out, nil } diff --git a/trust/service.go b/trust/service.go index 12b9645667025..6a804faf5297b 100644 --- a/trust/service.go +++ b/trust/service.go @@ -5,70 +5,49 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" "github.com/docker/libtrust" ) -func (t *TrustStore) Install(eng *engine.Engine) error { - for name, handler := range map[string]engine.Handler{ - "trust_key_check": t.CmdCheckKey, - "trust_update_base": t.CmdUpdateBase, - } { - if err := eng.Register(name, handler); err != nil { - return fmt.Errorf("Could not register %q: %v", name, err) - } - } - return nil -} +type NotVerifiedError string -func (t *TrustStore) CmdCheckKey(job *engine.Job) error { - if n := len(job.Args); n != 1 { - return fmt.Errorf("Usage: %s NAMESPACE", job.Name) - } - var ( - namespace = job.Args[0] - keyBytes = job.Getenv("PublicKey") - ) +func (e NotVerifiedError) Error() string { + return string(e) +} - if keyBytes == "" { - return fmt.Errorf("Missing PublicKey") +func (t *TrustStore) CheckKey(ns string, key []byte, perm uint16) (bool, error) { + if len(key) == 0 { + return false, fmt.Errorf("Missing PublicKey") } - pk, err := libtrust.UnmarshalPublicKeyJWK([]byte(keyBytes)) + pk, err := libtrust.UnmarshalPublicKeyJWK(key) if err != nil { - return fmt.Errorf("Error unmarshalling public key: %s", err) + return false, fmt.Errorf("Error unmarshalling public key: %v", err) } - permission := uint16(job.GetenvInt("Permission")) - if permission == 0 { - permission = 0x03 + if perm == 0 { + perm = 0x03 } t.RLock() defer t.RUnlock() if t.graph == nil { - job.Stdout.Write([]byte("no graph")) - return nil + return false, NotVerifiedError("no graph") } // Check if any expired grants - verified, err := t.graph.Verify(pk, namespace, permission) + verified, err := t.graph.Verify(pk, ns, perm) if err != nil { - return fmt.Errorf("Error verifying key to namespace: %s", namespace) + return false, fmt.Errorf("Error verifying key to namespace: %s", ns) } if !verified { - logrus.Debugf("Verification failed for %s using key %s", namespace, pk.KeyID()) - job.Stdout.Write([]byte("not verified")) - } else if t.expiration.Before(time.Now()) { - job.Stdout.Write([]byte("expired")) - } else { - job.Stdout.Write([]byte("verified")) + logrus.Debugf("Verification failed for %s using key %s", ns, pk.KeyID()) + return false, NotVerifiedError("not verified") } - - return nil + if t.expiration.Before(time.Now()) { + return false, NotVerifiedError("expired") + } + return true, nil } -func (t *TrustStore) CmdUpdateBase(job *engine.Job) error { +func (t *TrustStore) UpdateBase() { t.fetch() - - return nil } diff --git a/trust/trusts.go b/trust/trusts.go index c4a2f4158b21f..885127ee5d6f1 100644 --- a/trust/trusts.go +++ b/trust/trusts.go @@ -62,8 +62,7 @@ func NewTrustStore(path string) (*TrustStore, error) { baseEndpoints: endpoints, } - err = t.reload() - if err != nil { + if err := t.reload(); err != nil { return nil, err } @@ -170,8 +169,7 @@ func (t *TrustStore) fetch() { continue } // TODO check if value differs - err = ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600) - if err != nil { + if err := ioutil.WriteFile(path.Join(t.path, bg+".json"), b, 0600); err != nil { logrus.Infof("Error writing trust graph statement: %s", err) } fetchCount++ @@ -180,8 +178,7 @@ func (t *TrustStore) fetch() { if fetchCount > 0 { go func() { - err := t.reload() - if err != nil { + if err := t.reload(); err != nil { logrus.Infof("Reload of trust graph failed: %s", err) } }() diff --git a/utils/git.go b/utils/git.go new file mode 100644 index 0000000000000..18e002d184210 --- /dev/null +++ b/utils/git.go @@ -0,0 +1,47 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "os/exec" + "strings" + + "github.com/docker/docker/pkg/urlutil" +) + +func GitClone(remoteURL string) (string, error) { + if !urlutil.IsGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + clone := cloneArgs(remoteURL, root) + + if output, err := exec.Command("git", clone...).CombinedOutput(); err != nil { + return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) + } + + return root, nil +} + +func cloneArgs(remoteURL, root string) []string { + args := []string{"clone", "--recursive"} + shallow := true + + if strings.HasPrefix(remoteURL, "http") { + res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) + if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + shallow = false + } + } + + if shallow { + args = append(args, "--depth", "1") + } + + return append(args, remoteURL, root) +} diff --git a/utils/git_test.go b/utils/git_test.go new file mode 100644 index 0000000000000..a82841ae1167e --- /dev/null +++ b/utils/git_test.go @@ -0,0 +1,56 @@ +package utils + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" +) + +func TestCloneArgsSmartHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query().Get("service") + w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) + }) + + args := cloneArgs(gitURL, "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} + +func TestCloneArgsDumbHttp(t *testing.T) { + mux := http.NewServeMux() + server := httptest.NewServer(mux) + serverURL, _ := url.Parse(server.URL) + + serverURL.Path = "/repo.git" + gitURL := serverURL.String() + + mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + }) + + args := cloneArgs(gitURL, "/tmp") + exp := []string{"clone", "--recursive", gitURL, "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} +func TestCloneArgsGit(t *testing.T) { + args := cloneArgs("git://github.com/docker/docker", "/tmp") + exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} + if !reflect.DeepEqual(args, exp) { + t.Fatalf("Expected %v, got %v", exp, args) + } +} diff --git a/utils/utils.go b/utils/utils.go index d0e76bf237aee..05dfb757a3bf9 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -2,9 +2,7 @@ package utils import ( "bufio" - "bytes" "crypto/sha1" - "crypto/sha256" "encoding/hex" "fmt" "io" @@ -13,47 +11,17 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "runtime" "strings" "sync" - "github.com/Sirupsen/logrus" "github.com/docker/docker/autogen/dockerversion" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stringutils" + "github.com/docker/docker/pkg/stringid" ) -type KeyValuePair struct { - Key string - Value string -} - -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -// Request a given URL and return an io.Reader -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -func Trunc(s string, maxlen int) string { - if len(s) <= maxlen { - return s - } - return s[:maxlen] -} - // Figure out the absolute path of our own binary (if it's still around). func SelfPath() string { path, err := exec.LookPath(os.Args[0]) @@ -127,12 +95,12 @@ func DockerInitPath(localCopy string) string { filepath.Join(filepath.Dir(selfPath), "dockerinit"), // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." - // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec + // https://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec "/usr/libexec/docker/dockerinit", "/usr/local/libexec/docker/dockerinit", // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." - // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA + // https://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA "/usr/lib/docker/dockerinit", "/usr/local/lib/docker/dockerinit", } @@ -155,84 +123,19 @@ func DockerInitPath(localCopy string) string { return "" } -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} - -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - err := fmt.Errorf("image ID '%s' is invalid", id) - return err - } - return nil -} - -// Code c/c from io.Copy() modified to handle escape sequence -func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - // char 16 is C-p - if nr == 1 && buf[0] == 16 { - nr, er = src.Read(buf) - // char 17 is C-q - if nr == 1 && buf[0] == 17 { - if err := src.Close(); err != nil { - return 0, err - } - return 0, nil - } - } - // ---- End of docker - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - +// FIXME: move to httputils? ioutils? type WriteFlusher struct { sync.Mutex w io.Writer flusher http.Flusher + flushed bool } func (wf *WriteFlusher) Write(b []byte) (n int, err error) { wf.Lock() defer wf.Unlock() n, err = wf.w.Write(b) + wf.flushed = true wf.flusher.Flush() return n, err } @@ -241,9 +144,16 @@ func (wf *WriteFlusher) Write(b []byte) (n int, err error) { func (wf *WriteFlusher) Flush() { wf.Lock() defer wf.Unlock() + wf.flushed = true wf.flusher.Flush() } +func (wf *WriteFlusher) Flushed() bool { + wf.Lock() + defer wf.Unlock() + return wf.flushed +} + func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { @@ -254,58 +164,6 @@ func NewWriteFlusher(w io.Writer) *WriteFlusher { return &WriteFlusher{w: w, flusher: flusher} } -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// An StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e *StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} - -func quote(word string, buf *bytes.Buffer) { - // Bail out early for "simple" strings - if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { - buf.WriteString(word) - return - } - - buf.WriteString("'") - - for i := 0; i < len(word); i++ { - b := word[i] - if b == '\'' { - // Replace literal ' with a close ', a \', and a open ' - buf.WriteString("'\\''") - } else { - buf.WriteByte(b) - } - } - - buf.WriteString("'") -} - -// Take a list of strings and escape them so they will be handled right -// when passed as arguments to an program via a shell -func ShellQuoteArguments(args []string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} - var globalTestID string // TestDirectory creates a new temporary directory and returns its path. @@ -313,7 +171,7 @@ var globalTestID string // new directory. func TestDirectory(templateDir string) (dir string, err error) { if globalTestID == "" { - globalTestID = stringutils.GenerateRandomString()[:4] + globalTestID = stringid.GenerateRandomID()[:4] } prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) if prefix == "" { @@ -343,26 +201,6 @@ func GetCallerName(depth int) string { return callerShortName } -func CopyFile(src, dst string) (int64, error) { - if src == dst { - return 0, nil - } - sf, err := os.Open(src) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(dst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - // ReplaceOrAppendValues returns the defaults with the overrides either // replaced by env key or appended to the list func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { @@ -401,37 +239,6 @@ func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { return defaults } -func DoesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - // ValidateContextDirectory checks if all the contents of the directory // can be read and returns an error if some files can't be read // symlinks which point to non-existing files don't trigger an error @@ -476,15 +283,6 @@ func ValidateContextDirectory(srcPath string, excludes []string) error { }) } -func StringsContainsNoCase(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - // Reads a .dockerignore file and returns the list of file patterns // to ignore. Note this will trim whitespace from each line as well // as use GO's "clean" func to get the shortest/cleanest path for each. @@ -516,27 +314,6 @@ func ReadDockerIgnore(path string) ([]string, error) { return excludes, nil } -// Wrap a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} - // ImageReference combines `repo` and `ref` and returns a string representing // the combination. If `ref` is a digest (meaning it's of the form // :, the returned string is @. Otherwise, diff --git a/utils/utils_test.go b/utils/utils_test.go index 94303a0e96819..2863009423dd5 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -1,9 +1,10 @@ package utils import ( - "bytes" + "fmt" + "io/ioutil" "os" - "strings" + "path/filepath" "testing" ) @@ -25,104 +26,6 @@ func TestReplaceAndAppendEnvVars(t *testing.T) { } } -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} - func TestImageReference(t *testing.T) { tests := []struct { repo string @@ -152,3 +55,46 @@ func TestDigestReference(t *testing.T) { t.Errorf("Unexpected DigestReference=true for input %q", input) } } + +func TestReadDockerIgnore(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "dockerignore-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + diName := filepath.Join(tmpDir, ".dockerignore") + + di, err := ReadDockerIgnore(diName) + if err != nil { + t.Fatalf("Expected not to have error, got %s", err) + } + + if diLen := len(di); diLen != 0 { + t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) + } + + content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") + err = ioutil.WriteFile(diName, []byte(content), 0777) + if err != nil { + t.Fatal(err) + } + + di, err = ReadDockerIgnore(diName) + if err != nil { + t.Fatal(err) + } + + if di[0] != "test1" { + t.Fatalf("First element is not test1") + } + if di[1] != "/test2" { + t.Fatalf("Second element is not /test2") + } + if di[2] != "/a/file/here" { + t.Fatalf("Third element is not /a/file/here") + } + if di[3] != "lastfile" { + t.Fatalf("Fourth element is not lastfile") + } +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 0000000000000..5f091bbc927b6 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1459 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: RepositoryNameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + tagParameterDescriptor = ParameterDescriptor{ + Name: "tag", + Type: "string", + Format: TagNameRegexp.String(), + Required: true, + Description: `Tag of the target manifiest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: `A uuid identifying the upload. This field can accept almost anything.`, + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + unauthorizedResponse = ResponseDescriptor{ + Description: "The client does not have access to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } + + unauthorizedResponsePush = ResponseDescriptor{ + Description: "The client does not have access to push to the repository.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: unauthorizedErrorsBody, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` + + unauthorizedErrorsBody = `{ + "errors:" [ + { + "code": "UNAUTHORIZED", + "message": "access to the requested resource is not authorized", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor + + // ErrorDescriptors provides a list of the error codes and their + // associated documentation and metadata. + ErrorDescriptors []ErrorDescriptor +}{ + RouteDescriptors: routeDescriptors, + ErrorDescriptors: errorDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particalar request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particalar response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status recieved by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCodes provides a list of status under which this error + // condition may arise. If it is empty, the error condition may be seen + // for any status code. + HTTPStatusCodes []int +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authorization.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The client is not authorized to access the registry.", + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/manifests/{reference:" + TagNameRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest idenfied by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have access to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Description: "The named manifest is not known to the registry.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Description: "The client does not have permission to push to the repository.", + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + }, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": + } + }, + ... + ] +}`, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + tagParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Tag", + Description: "The specified `name` or `tag` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusUnauthorized, + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON error response body.", + Format: "", + }, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeUnauthorized, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `tag` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Fetch the blob identified by `name` and `digest`. Used to fetch layers by tarsum digest.", + Methods: []MethodDescriptor{ + + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/", + Entity: "Intiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponsePush, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + RepositoryNameRegexp.String() + "}/blobs/uploads/{uuid}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Description: "Upload a chunk of data to specified upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + // TODO(stevvooe): Break this down into three separate requests: + // 1. Complete an upload where all data has already been sent. + // 2. Complete an upload where the entire body is in the PUT. + // 3. Complete an upload where the final, partial chunk is the body. + + Description: "Complete the upload, providing the _final_ chunk of data, if necessary. This method may take a body with all the data. If the `Content-Range` header is specified, it may include the final chunk. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header. May be omitted if no data is provided.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponsePush, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid. The contents of the `Range` header may be used to resolve the condition.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponse, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + }, + }, + }, + }, + }, + }, +} + +// ErrorDescriptors provides a list of HTTP API Error codes that may be +// encountered when interacting with the registry API. +var errorDescriptors = []ErrorDescriptor{ + { + Code: ErrorCodeUnknown, + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + }, + { + Code: ErrorCodeUnsupported, + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + }, + { + Code: ErrorCodeUnauthorized, + Value: "UNAUTHORIZED", + Message: "access to the requested resource is not authorized", + Description: `The access controller denied access for the operation on + a resource. Often this will be accompanied by a 401 Unauthorized + response status.`, + }, + { + Code: ErrorCodeDigestInvalid, + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeSizeInvalid, + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeNameInvalid, + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeTagInvalid, + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + { + Code: ErrorCodeNameUnknown, + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeManifestUnknown, + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeManifestInvalid, + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeManifestUnverified, + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCodes: []int{http.StatusBadRequest}, + }, + { + Code: ErrorCodeBlobUnknown, + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCodes: []int{http.StatusBadRequest, http.StatusNotFound}, + }, + + { + Code: ErrorCodeBlobUploadUnknown, + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, + { + Code: ErrorCodeBlobUploadInvalid, + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCodes: []int{http.StatusNotFound}, + }, +} + +var errorCodeToDescriptors map[ErrorCode]ErrorDescriptor +var idToDescriptors map[string]ErrorDescriptor +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + errorCodeToDescriptors = make(map[ErrorCode]ErrorDescriptor, len(errorDescriptors)) + idToDescriptors = make(map[string]ErrorDescriptor, len(errorDescriptors)) + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range errorDescriptors { + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + } + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/registry/v2/doc.go b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go similarity index 64% rename from registry/v2/doc.go rename to vendor/src/github.com/docker/distribution/registry/api/v2/doc.go index 30fe2271a19b5..cde0119594dd0 100644 --- a/registry/v2/doc.go +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go @@ -5,9 +5,5 @@ // // Definitions here are considered to be locked down for the V2 registry api. // Any changes must be considered carefully and should not proceed without a -// change proposal. -// -// Currently, while the HTTP API definitions are considered stable, the Go API -// exports are considered unstable. Go API consumers should take care when -// relying on these definitions until this message is deleted. +// change proposal in docker core. package v2 diff --git a/registry/v2/errors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go similarity index 94% rename from registry/v2/errors.go rename to vendor/src/github.com/docker/distribution/registry/api/v2/errors.go index 8c85d3a97f164..cbae020efb1a6 100644 --- a/registry/v2/errors.go +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go @@ -13,6 +13,12 @@ const ( // ErrorCodeUnknown is a catch-all for errors not defined below. ErrorCodeUnknown ErrorCode = iota + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported + + // ErrorCodeUnauthorized is returned if a request is not authorized. + ErrorCodeUnauthorized + // ErrorCodeDigestInvalid is returned when uploading a blob if the // provided digest does not match the blob contents. ErrorCodeDigestInvalid @@ -51,6 +57,9 @@ const ( // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. ErrorCodeBlobUploadUnknown + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid ) // ParseErrorCode attempts to parse the error code string, returning diff --git a/registry/v2/errors_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go similarity index 96% rename from registry/v2/errors_test.go rename to vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go index 4a80cdfe2d5d6..9cc831c440189 100644 --- a/registry/v2/errors_test.go +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/errors_test.go @@ -4,12 +4,14 @@ import ( "encoding/json" "reflect" "testing" + + "github.com/docker/distribution/digest" ) // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { - for _, desc := range ErrorDescriptors { + for _, desc := range errorDescriptors { if desc.Code.String() != desc.Value { t.Fatalf("error code string incorrect: %q != %q", desc.Code.String(), desc.Value) } @@ -59,7 +61,7 @@ func TestErrorsManagement(t *testing.T) { errs.Push(ErrorCodeDigestInvalid) errs.Push(ErrorCodeBlobUnknown, - map[string]string{"digest": "sometestblobsumdoesntmatter"}) + map[string]digest.Digest{"digest": "sometestblobsumdoesntmatter"}) p, err := json.Marshal(errs) diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/names.go b/vendor/src/github.com/docker/distribution/registry/api/v2/names.go new file mode 100644 index 0000000000000..e4a98861cbadd --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/names.go @@ -0,0 +1,100 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" +) + +// TODO(stevvooe): Move these definitions back to an exported package. While +// they are used with v2 definitions, their relevance expands beyond. +// "distribution/names" is a candidate package. + +const ( + // RepositoryNameComponentMinLength is the minimum number of characters in a + // single repository name slash-delimited component + RepositoryNameComponentMinLength = 2 + + // RepositoryNameMinComponents is the minimum number of slash-delimited + // components that a repository name must have + RepositoryNameMinComponents = 1 + + // RepositoryNameTotalLengthMax is the maximum total number of characters in + // a repository name + RepositoryNameTotalLengthMax = 255 +) + +// RepositoryNameComponentRegexp restricts registry path component names to +// start with at least one letter or number, with following parts able to +// be separated by one period, dash or underscore. +var RepositoryNameComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[._-][a-z0-9]+)*`) + +// RepositoryNameComponentAnchoredRegexp is the version of +// RepositoryNameComponentRegexp which must completely match the content +var RepositoryNameComponentAnchoredRegexp = regexp.MustCompile(`^` + RepositoryNameComponentRegexp.String() + `$`) + +// RepositoryNameRegexp builds on RepositoryNameComponentRegexp to allow +// multiple path components, separated by a forward slash. +var RepositoryNameRegexp = regexp.MustCompile(`(?:` + RepositoryNameComponentRegexp.String() + `/)*` + RepositoryNameComponentRegexp.String()) + +// TagNameRegexp matches valid tag names. From docker/docker:graph/tags.go. +var TagNameRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + +// TODO(stevvooe): Contribute these exports back to core, so they are shared. + +var ( + // ErrRepositoryNameComponentShort is returned when a repository name + // contains a component which is shorter than + // RepositoryNameComponentMinLength + ErrRepositoryNameComponentShort = fmt.Errorf("respository name component must be %v or more characters", RepositoryNameComponentMinLength) + + // ErrRepositoryNameMissingComponents is returned when a repository name + // contains fewer than RepositoryNameMinComponents components + ErrRepositoryNameMissingComponents = fmt.Errorf("repository name must have at least %v components", RepositoryNameMinComponents) + + // ErrRepositoryNameLong is returned when a repository name is longer than + // RepositoryNameTotalLengthMax + ErrRepositoryNameLong = fmt.Errorf("repository name must not be more than %v characters", RepositoryNameTotalLengthMax) + + // ErrRepositoryNameComponentInvalid is returned when a repository name does + // not match RepositoryNameComponentRegexp + ErrRepositoryNameComponentInvalid = fmt.Errorf("repository name component must match %q", RepositoryNameComponentRegexp.String()) +) + +// ValidateRespositoryName ensures the repository name is valid for use in the +// registry. This function accepts a superset of what might be accepted by +// docker core or docker hub. If the name does not pass validation, an error, +// describing the conditions, is returned. +// +// Effectively, the name should comply with the following grammar: +// +// alpha-numeric := /[a-z0-9]+/ +// separator := /[._-]/ +// component := alpha-numeric [separator alpha-numeric]* +// namespace := component ['/' component]* +// +// The result of the production, known as the "namespace", should be limited +// to 255 characters. +func ValidateRespositoryName(name string) error { + if len(name) > RepositoryNameTotalLengthMax { + return ErrRepositoryNameLong + } + + components := strings.Split(name, "/") + + if len(components) < RepositoryNameMinComponents { + return ErrRepositoryNameMissingComponents + } + + for _, component := range components { + if len(component) < RepositoryNameComponentMinLength { + return ErrRepositoryNameComponentShort + } + + if !RepositoryNameComponentAnchoredRegexp.MatchString(component) { + return ErrRepositoryNameComponentInvalid + } + } + + return nil +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go new file mode 100644 index 0000000000000..de6a168f0f650 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/names_test.go @@ -0,0 +1,100 @@ +package v2 + +import ( + "strings" + "testing" +) + +func TestRepositoryNameRegexp(t *testing.T) { + for _, testcase := range []struct { + input string + err error + }{ + { + input: "short", + }, + { + input: "simple/name", + }, + { + input: "library/ubuntu", + }, + { + input: "docker/stevvooe/app", + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + }, + { + input: "aa/aa/bb/bb/bb", + }, + { + input: "a/a/a/b/b", + err: ErrRepositoryNameComponentShort, + }, + { + input: "a/a/a/a/", + err: ErrRepositoryNameComponentShort, + }, + { + input: "foo.com/bar/baz", + }, + { + input: "blog.foo.com/bar/baz", + }, + { + input: "asdf", + }, + { + input: "asdf$$^/aa", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: "aa-a/aa", + }, + { + input: "aa/aa", + }, + { + input: "a-a/a-a", + }, + { + input: "a", + err: ErrRepositoryNameComponentShort, + }, + { + input: "a-/a/a/a", + err: ErrRepositoryNameComponentInvalid, + }, + { + input: strings.Repeat("a", 255), + }, + { + input: strings.Repeat("a", 256), + err: ErrRepositoryNameLong, + }, + } { + + failf := func(format string, v ...interface{}) { + t.Logf(testcase.input+": "+format, v...) + t.Fail() + } + + if err := ValidateRespositoryName(testcase.input); err != testcase.err { + if testcase.err != nil { + if err != nil { + failf("unexpected error for invalid repository: got %v, expected %v", err, testcase.err) + } else { + failf("expected invalid repository: %v", testcase.err) + } + } else { + if err != nil { + // Wrong error returned. + failf("unexpected error validating repository name: %v, expected %v", err, testcase.err) + } else { + failf("unexpected error validating repository name: %v", err) + } + } + } + } +} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 0000000000000..69f9d9012a436 --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,47 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" +) + +var allEndpoints = []string{ + RouteNameManifest, + RouteNameTags, + RouteNameBlob, + RouteNameBlobUpload, + RouteNameBlobUploadChunk, +} + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/registry/v2/routes_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go similarity index 50% rename from registry/v2/routes_test.go rename to vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go index 0191feed00189..afab71fce0d7a 100644 --- a/registry/v2/routes_test.go +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -2,19 +2,24 @@ package v2 import ( "encoding/json" + "fmt" + "math/rand" "net/http" "net/http/httptest" "reflect" + "strings" "testing" + "time" "github.com/gorilla/mux" ) type routeTestCase struct { - RequestURI string - Vars map[string]string - RouteName string - StatusCode int + RequestURI string + ExpectedURI string + Vars map[string]string + RouteName string + StatusCode int } // TestRouter registers a test handler with all the routes and ensures that @@ -24,28 +29,7 @@ type routeTestCase struct { // // This may go away as the application structure comes together. func TestRouter(t *testing.T) { - - router := Router() - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - testCase := routeTestCase{ - RequestURI: r.RequestURI, - Vars: mux.Vars(r), - RouteName: mux.CurrentRoute(r).GetName(), - } - - enc := json.NewEncoder(w) - - if err := enc.Encode(testCase); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - }) - - // Startup test server - server := httptest.NewServer(router) - - for _, testcase := range []routeTestCase{ + testCases := []routeTestCase{ { RouteName: RouteNameBase, RequestURI: "/v2/", @@ -67,6 +51,14 @@ func TestRouter(t *testing.T) { "reference": "tag", }, }, + { + RouteName: RouteNameManifest, + RequestURI: "/v2/foo/bar/manifests/sha256:abcdef01234567890", + Vars: map[string]string{ + "name": "foo/bar", + "reference": "sha256:abcdef01234567890", + }, + }, { RouteName: RouteNameTags, RequestURI: "/v2/foo/bar/tags/list", @@ -141,14 +133,98 @@ func TestRouter(t *testing.T) { "name": "foo/bar/manifests", }, }, + } + + checkTestRouter(t, testCases, "", true) + checkTestRouter(t, testCases, "/prefix/", true) +} + +func TestRouterWithPathTraversals(t *testing.T) { + testCases := []routeTestCase{ { - RouteName: RouteNameBlobUploadChunk, - RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", - StatusCode: http.StatusNotFound, + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/../../blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + ExpectedURI: "/blob/uploads/D95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, }, - } { + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/../bar/baz/tags/list", + ExpectedURI: "/v2/bar/baz/tags/list", + Vars: map[string]string{ + "name": "bar/baz", + }, + }, + } + checkTestRouter(t, testCases, "", false) +} + +func TestRouterWithBadCharacters(t *testing.T) { + if testing.Short() { + testCases := []routeTestCase{ + { + RouteName: RouteNameBlobUploadChunk, + RequestURI: "/v2/foo/blob/uploads/不95306FA-FAD3-4E36-8D41-CF1C93EF8286", + StatusCode: http.StatusNotFound, + }, + { + // Testing for path traversal attack handling + RouteName: RouteNameTags, + RequestURI: "/v2/foo/不bar/tags/list", + StatusCode: http.StatusNotFound, + }, + } + checkTestRouter(t, testCases, "", true) + } else { + // in the long version we're going to fuzz the router + // with random UTF8 characters not in the 128 bit ASCII range. + // These are not valid characters for the router and we expect + // 404s on every test. + rand.Seed(time.Now().UTC().UnixNano()) + testCases := make([]routeTestCase, 1000) + for idx := range testCases { + testCases[idx] = routeTestCase{ + RouteName: RouteNameTags, + RequestURI: fmt.Sprintf("/v2/%v/%v/tags/list", randomString(10), randomString(10)), + StatusCode: http.StatusNotFound, + } + } + checkTestRouter(t, testCases, "", true) + } +} + +func checkTestRouter(t *testing.T, testCases []routeTestCase, prefix string, deeplyEqual bool) { + router := RouterWithPrefix(prefix) + + testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + testCase := routeTestCase{ + RequestURI: r.RequestURI, + Vars: mux.Vars(r), + RouteName: mux.CurrentRoute(r).GetName(), + } + + enc := json.NewEncoder(w) + + if err := enc.Encode(testCase); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) + + // Startup test server + server := httptest.NewServer(router) + + for _, testcase := range testCases { + testcase.RequestURI = strings.TrimSuffix(prefix, "/") + testcase.RequestURI // Register the endpoint - router.GetRoute(testcase.RouteName).Handler(testHandler) + route := router.GetRoute(testcase.RouteName) + if route == nil { + t.Fatalf("route for name %q not found", testcase.RouteName) + } + + route.Handler(testHandler) + u := server.URL + testcase.RequestURI resp, err := http.Get(u) @@ -161,6 +237,10 @@ func TestRouter(t *testing.T) { // Override default, zero-value testcase.StatusCode = http.StatusOK } + if testcase.ExpectedURI == "" { + // Override default, zero-value + testcase.ExpectedURI = testcase.RequestURI + } if resp.StatusCode != testcase.StatusCode { t.Fatalf("unexpected status for %s: %v %v", u, resp.Status, resp.StatusCode) @@ -180,13 +260,56 @@ func TestRouter(t *testing.T) { // Needs to be set out of band actualRouteInfo.StatusCode = resp.StatusCode + if actualRouteInfo.RequestURI != testcase.ExpectedURI { + t.Fatalf("URI %v incorrectly parsed, expected %v", actualRouteInfo.RequestURI, testcase.ExpectedURI) + } + if actualRouteInfo.RouteName != testcase.RouteName { t.Fatalf("incorrect route %q matched, expected %q", actualRouteInfo.RouteName, testcase.RouteName) } - if !reflect.DeepEqual(actualRouteInfo, testcase) { + // when testing deep equality, the actualRouteInfo has an empty ExpectedURI, we don't want + // that to make the comparison fail. We're otherwise done with the testcase so empty the + // testcase.ExpectedURI + testcase.ExpectedURI = "" + if deeplyEqual && !reflect.DeepEqual(actualRouteInfo, testcase) { t.Fatalf("actual does not equal expected: %#v != %#v", actualRouteInfo, testcase) } } } + +// -------------- START LICENSED CODE -------------- +// The following code is derivative of https://github.com/google/gofuzz +// gofuzz is licensed under the Apache License, Version 2.0, January 2004, +// a copy of which can be found in the LICENSE file at the root of this +// repository. + +// These functions allow us to generate strings containing only multibyte +// characters that are invalid in our URLs. They are used above for fuzzing +// to ensure we always get 404s on these invalid strings +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose() rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +func randomString(length int) string { + runes := make([]rune, length) + for i := range runes { + runes[i] = unicodeRanges[rand.Intn(len(unicodeRanges))].choose() + } + return string(runes) +} + +// -------------- END LICENSED CODE -------------- diff --git a/registry/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go similarity index 80% rename from registry/v2/urls.go rename to vendor/src/github.com/docker/distribution/registry/api/v2/urls.go index 38fa98af01d33..4b42dd1624c83 100644 --- a/registry/v2/urls.go +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go @@ -3,7 +3,9 @@ package v2 import ( "net/http" "net/url" + "strings" + "github.com/docker/distribution/digest" "github.com/gorilla/mux" ) @@ -42,9 +44,40 @@ func NewURLBuilderFromString(root string) (*URLBuilder, error) { // NewURLBuilderFromRequest uses information from an *http.Request to // construct the root url. func NewURLBuilderFromRequest(r *http.Request) *URLBuilder { + var scheme string + + forwardedProto := r.Header.Get("X-Forwarded-Proto") + + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case r.TLS != nil: + scheme = "https" + case len(r.URL.Scheme) > 0: + scheme = r.URL.Scheme + default: + scheme = "http" + } + + host := r.Host + forwardedHost := r.Header.Get("X-Forwarded-Host") + if len(forwardedHost) > 0 { + host = forwardedHost + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + u := &url.URL{ - Scheme: r.URL.Scheme, - Host: r.Host, + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] } return NewURLBuilder(u) @@ -74,7 +107,8 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { return tagsURL.String(), nil } -// BuildManifestURL constructs a url for the manifest identified by name and reference. +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { route := ub.cloneRoute(RouteNameManifest) @@ -87,10 +121,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst string) (string, error) { +func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name, "digest", dgst) + layerURL, err := route.URL("name", name, "digest", dgst.String()) if err != nil { return "", err } @@ -149,6 +183,10 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { return nil, err } + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + return cr.root.ResolveReference(routeURL), nil } diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go new file mode 100644 index 0000000000000..237d0f6159d3a --- /dev/null +++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -0,0 +1,225 @@ +package v2 + +import ( + "net/http" + "net/url" + "testing" +) + +type urlBuilderTestCase struct { + description string + expectedPath string + build func() (string, error) +} + +func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + return []urlBuilderTestCase{ + { + description: "test base url", + expectedPath: "/v2/", + build: urlBuilder.BuildBaseURL, + }, + { + description: "test tags url", + expectedPath: "/v2/foo/bar/tags/list", + build: func() (string, error) { + return urlBuilder.BuildTagsURL("foo/bar") + }, + }, + { + description: "test manifest url", + expectedPath: "/v2/foo/bar/manifests/tag", + build: func() (string, error) { + return urlBuilder.BuildManifestURL("foo/bar", "tag") + }, + }, + { + description: "build blob url", + expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + build: func() (string, error) { + return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + }, + }, + { + description: "build blob upload url", + expectedPath: "/v2/foo/bar/blobs/uploads/", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar") + }, + }, + { + description: "build blob upload url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + { + description: "build blob upload chunk url", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + }, + }, + { + description: "build blob upload chunk url with digest and size", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + build: func() (string, error) { + return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + "size": []string{"10000"}, + "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + }) + }, + }, + } +} + +// TestURLBuilder tests the various url building functions, ensuring they are +// returning the expected values. +func TestURLBuilder(t *testing.T) { + roots := []string{ + "http://example.com", + "https://example.com", + "http://localhost:5000", + "https://localhost:5443", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +func TestURLBuilderWithPrefix(t *testing.T) { + roots := []string{ + "http://example.com/prefix/", + "https://example.com/prefix/", + "http://localhost:5000/prefix/", + "https://localhost:5443/prefix/", + } + + for _, root := range roots { + urlBuilder, err := NewURLBuilderFromString(root) + if err != nil { + t.Fatalf("unexpected error creating urlbuilder: %v", err) + } + + for _, testCase := range makeURLBuilderTestCases(urlBuilder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := root[0:len(root)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +type builderFromRequestTestCase struct { + request *http.Request + base string +} + +func TestBuilderFromRequest(t *testing.T) { + u, err := url.Parse("http://example.com") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} + +func TestBuilderFromRequestWithPrefix(t *testing.T) { + u, err := url.Parse("http://example.com/prefix/v2/") + if err != nil { + t.Fatal(err) + } + + forwardedProtoHeader := make(http.Header, 1) + forwardedProtoHeader.Set("X-Forwarded-Proto", "https") + + testRequests := []struct { + request *http.Request + base string + }{ + { + request: &http.Request{URL: u, Host: u.Host}, + base: "http://example.com/prefix/", + }, + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "https://example.com/prefix/", + }, + } + + for _, tr := range testRequests { + builder := NewURLBuilderFromRequest(tr.request) + + for _, testCase := range makeURLBuilderTestCases(builder) { + url, err := testCase.build() + if err != nil { + t.Fatalf("%s: error building url: %v", testCase.description, err) + } + + expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath + + if url != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + } + } + } +} diff --git a/vendor/src/github.com/go-check/check/.gitignore b/vendor/src/github.com/go-check/check/.gitignore new file mode 100644 index 0000000000000..191a5360b759f --- /dev/null +++ b/vendor/src/github.com/go-check/check/.gitignore @@ -0,0 +1,4 @@ +_* +*.swp +*.[568] +[568].out diff --git a/vendor/src/github.com/go-check/check/LICENSE b/vendor/src/github.com/go-check/check/LICENSE new file mode 100644 index 0000000000000..545cf2d3311b0 --- /dev/null +++ b/vendor/src/github.com/go-check/check/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/go-check/check/README.md b/vendor/src/github.com/go-check/check/README.md new file mode 100644 index 0000000000000..0ca9e57260468 --- /dev/null +++ b/vendor/src/github.com/go-check/check/README.md @@ -0,0 +1,20 @@ +Instructions +============ + +Install the package with: + + go get gopkg.in/check.v1 + +Import it with: + + import "gopkg.in/check.v1" + +and use _check_ as the package name inside the code. + +For more details, visit the project page: + +* http://labix.org/gocheck + +and the API documentation: + +* https://gopkg.in/check.v1 diff --git a/vendor/src/github.com/go-check/check/TODO b/vendor/src/github.com/go-check/check/TODO new file mode 100644 index 0000000000000..33498270eae3a --- /dev/null +++ b/vendor/src/github.com/go-check/check/TODO @@ -0,0 +1,2 @@ +- Assert(slice, Contains, item) +- Parallel test support diff --git a/vendor/src/github.com/go-check/check/benchmark.go b/vendor/src/github.com/go-check/check/benchmark.go new file mode 100644 index 0000000000000..48cb8c8114e7a --- /dev/null +++ b/vendor/src/github.com/go-check/check/benchmark.go @@ -0,0 +1,163 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package check + +import ( + "fmt" + "runtime" + "time" +) + +var memStats runtime.MemStats + +// testingB is a type passed to Benchmark functions to manage benchmark +// timing and to specify the number of iterations to run. +type timer struct { + start time.Time // Time test or benchmark started + duration time.Duration + N int + bytes int64 + timerOn bool + benchTime time.Duration + // The initial states of memStats.Mallocs and memStats.TotalAlloc. + startAllocs uint64 + startBytes uint64 + // The net total of this test after being run. + netAllocs uint64 + netBytes uint64 +} + +// StartTimer starts timing a test. This function is called automatically +// before a benchmark starts, but it can also used to resume timing after +// a call to StopTimer. +func (c *C) StartTimer() { + if !c.timerOn { + c.start = time.Now() + c.timerOn = true + + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } +} + +// StopTimer stops timing a test. This can be used to pause the timer +// while performing complex initialization that you don't +// want to measure. +func (c *C) StopTimer() { + if c.timerOn { + c.duration += time.Now().Sub(c.start) + c.timerOn = false + runtime.ReadMemStats(&memStats) + c.netAllocs += memStats.Mallocs - c.startAllocs + c.netBytes += memStats.TotalAlloc - c.startBytes + } +} + +// ResetTimer sets the elapsed benchmark time to zero. +// It does not affect whether the timer is running. +func (c *C) ResetTimer() { + if c.timerOn { + c.start = time.Now() + runtime.ReadMemStats(&memStats) + c.startAllocs = memStats.Mallocs + c.startBytes = memStats.TotalAlloc + } + c.duration = 0 + c.netAllocs = 0 + c.netBytes = 0 +} + +// SetBytes informs the number of bytes that the benchmark processes +// on each iteration. If this is called in a benchmark it will also +// report MB/s. +func (c *C) SetBytes(n int64) { + c.bytes = n +} + +func (c *C) nsPerOp() int64 { + if c.N <= 0 { + return 0 + } + return c.duration.Nanoseconds() / int64(c.N) +} + +func (c *C) mbPerSec() float64 { + if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 { + return 0 + } + return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds() +} + +func (c *C) timerString() string { + if c.N <= 0 { + return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9) + } + mbs := c.mbPerSec() + mb := "" + if mbs != 0 { + mb = fmt.Sprintf("\t%7.2f MB/s", mbs) + } + nsop := c.nsPerOp() + ns := fmt.Sprintf("%10d ns/op", nsop) + if c.N > 0 && nsop < 100 { + // The format specifiers here make sure that + // the ones digits line up for all three possible formats. + if nsop < 10 { + ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } else { + ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N)) + } + } + memStats := "" + if c.benchMem { + allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N)) + allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N)) + memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs) + } + return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats) +} + +func min(x, y int) int { + if x > y { + return y + } + return x +} + +func max(x, y int) int { + if x < y { + return y + } + return x +} + +// roundDown10 rounds a number down to the nearest power of 10. +func roundDown10(n int) int { + var tens = 0 + // tens = floor(log_10(n)) + for n > 10 { + n = n / 10 + tens++ + } + // result = 10^tens + result := 1 + for i := 0; i < tens; i++ { + result *= 10 + } + return result +} + +// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX]. +func roundUp(n int) int { + base := roundDown10(n) + if n < (2 * base) { + return 2 * base + } + if n < (5 * base) { + return 5 * base + } + return 10 * base +} diff --git a/vendor/src/github.com/go-check/check/benchmark_test.go b/vendor/src/github.com/go-check/check/benchmark_test.go new file mode 100644 index 0000000000000..4dd827c160da4 --- /dev/null +++ b/vendor/src/github.com/go-check/check/benchmark_test.go @@ -0,0 +1,91 @@ +// These tests verify the test running logic. + +package check_test + +import ( + "time" + . "gopkg.in/check.v1" +) + +var benchmarkS = Suite(&BenchmarkS{}) + +type BenchmarkS struct{} + +func (s *BenchmarkS) TestCountSuite(c *C) { + suitesRun += 1 +} + +func (s *BenchmarkS) TestBasicTestTiming(c *C) { + helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" + + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestStreamTestTiming(c *C) { + helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(&helper, &runConf) + + expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmark(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkTime: 10000000, + Filter: "Benchmark1", + } + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Benchmark1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Benchmark1") + c.Check(helper.calls[6], Equals, "TearDownTest") + // ... and more. + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmarkBytes(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkTime: 10000000, + Filter: "Benchmark2", + } + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n" + c.Assert(output.value, Matches, expected) +} + +func (s *BenchmarkS) TestBenchmarkMem(c *C) { + helper := FixtureHelper{sleep: 100000} + output := String{} + runConf := RunConf{ + Output: &output, + Benchmark: true, + BenchmarkMem: true, + BenchmarkTime: 10000000, + Filter: "Benchmark3", + } + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n" + c.Assert(output.value, Matches, expected) +} diff --git a/vendor/src/github.com/go-check/check/bootstrap_test.go b/vendor/src/github.com/go-check/check/bootstrap_test.go new file mode 100644 index 0000000000000..e55f327c7be03 --- /dev/null +++ b/vendor/src/github.com/go-check/check/bootstrap_test.go @@ -0,0 +1,82 @@ +// These initial tests are for bootstrapping. They verify that we can +// basically use the testing infrastructure itself to check if the test +// system is working. +// +// These tests use will break down the test runner badly in case of +// errors because if they simply fail, we can't be sure the developer +// will ever see anything (because failing means the failing system +// somehow isn't working! :-) +// +// Do not assume *any* internal functionality works as expected besides +// what's actually tested here. + +package check_test + +import ( + "fmt" + "gopkg.in/check.v1" + "strings" +) + +type BootstrapS struct{} + +var boostrapS = check.Suite(&BootstrapS{}) + +func (s *BootstrapS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *BootstrapS) TestFailedAndFail(c *check.C) { + if c.Failed() { + critical("c.Failed() must be false first!") + } + c.Fail() + if !c.Failed() { + critical("c.Fail() didn't put the test in a failed state!") + } + c.Succeed() +} + +func (s *BootstrapS) TestFailedAndSucceed(c *check.C) { + c.Fail() + c.Succeed() + if c.Failed() { + critical("c.Succeed() didn't put the test back in a non-failed state") + } +} + +func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) { + c.Log("Hello there!") + log := c.GetTestLog() + if log != "Hello there!\n" { + critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log)) + } +} + +func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) { + c.Logf("Hello %v", "there!") + log := c.GetTestLog() + if log != "Hello there!\n" { + critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log)) + } +} + +func (s *BootstrapS) TestRunShowsErrors(c *check.C) { + output := String{} + check.Run(&FailHelper{}, &check.RunConf{Output: &output}) + if strings.Index(output.value, "Expected failure!") == -1 { + critical(fmt.Sprintf("RunWithWriter() output did not contain the "+ + "expected failure! Got: %#v", + output.value)) + } +} + +func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) { + output := String{} + check.Run(&SuccessHelper{}, &check.RunConf{Output: &output}) + if strings.Index(output.value, "Expected success!") != -1 { + critical(fmt.Sprintf("RunWithWriter() output contained a successful "+ + "test! Got: %#v", + output.value)) + } +} diff --git a/vendor/src/github.com/go-check/check/check.go b/vendor/src/github.com/go-check/check/check.go new file mode 100644 index 0000000000000..ca8c0f92deb46 --- /dev/null +++ b/vendor/src/github.com/go-check/check/check.go @@ -0,0 +1,945 @@ +// Package check is a rich testing extension for Go's testing package. +// +// For details about the project, see: +// +// http://labix.org/gocheck +// +package check + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// ----------------------------------------------------------------------- +// Internal type which deals with suite method calling. + +const ( + fixtureKd = iota + testKd +) + +type funcKind int + +const ( + succeededSt = iota + failedSt + skippedSt + panickedSt + fixturePanickedSt + missedSt +) + +type funcStatus int + +// A method value can't reach its own Method structure. +type methodType struct { + reflect.Value + Info reflect.Method +} + +func newMethod(receiver reflect.Value, i int) *methodType { + return &methodType{receiver.Method(i), receiver.Type().Method(i)} +} + +func (method *methodType) PC() uintptr { + return method.Info.Func.Pointer() +} + +func (method *methodType) suiteName() string { + t := method.Info.Type.In(0) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.Name() +} + +func (method *methodType) String() string { + return method.suiteName() + "." + method.Info.Name +} + +func (method *methodType) matches(re *regexp.Regexp) bool { + return (re.MatchString(method.Info.Name) || + re.MatchString(method.suiteName()) || + re.MatchString(method.String())) +} + +type C struct { + method *methodType + kind funcKind + testName string + status funcStatus + logb *logger + logw io.Writer + done chan *C + reason string + mustFail bool + tempDir *tempDir + benchMem bool + startTime time.Time + timer +} + +func (c *C) stopNow() { + runtime.Goexit() +} + +// logger is a concurrency safe byte.Buffer +type logger struct { + sync.Mutex + writer bytes.Buffer +} + +func (l *logger) Write(buf []byte) (int, error) { + l.Lock() + defer l.Unlock() + return l.writer.Write(buf) +} + +func (l *logger) WriteTo(w io.Writer) (int64, error) { + l.Lock() + defer l.Unlock() + return l.writer.WriteTo(w) +} + +func (l *logger) String() string { + l.Lock() + defer l.Unlock() + return l.writer.String() +} + +// ----------------------------------------------------------------------- +// Handling of temporary files and directories. + +type tempDir struct { + sync.Mutex + path string + counter int +} + +func (td *tempDir) newPath() string { + td.Lock() + defer td.Unlock() + if td.path == "" { + var err error + for i := 0; i != 100; i++ { + path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int()) + if err = os.Mkdir(path, 0700); err == nil { + td.path = path + break + } + } + if td.path == "" { + panic("Couldn't create temporary directory: " + err.Error()) + } + } + result := filepath.Join(td.path, strconv.Itoa(td.counter)) + td.counter += 1 + return result +} + +func (td *tempDir) removeAll() { + td.Lock() + defer td.Unlock() + if td.path != "" { + err := os.RemoveAll(td.path) + if err != nil { + fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error()) + } + } +} + +// Create a new temporary directory which is automatically removed after +// the suite finishes running. +func (c *C) MkDir() string { + path := c.tempDir.newPath() + if err := os.Mkdir(path, 0700); err != nil { + panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error())) + } + return path +} + +// ----------------------------------------------------------------------- +// Low-level logging functions. + +func (c *C) log(args ...interface{}) { + c.writeLog([]byte(fmt.Sprint(args...) + "\n")) +} + +func (c *C) logf(format string, args ...interface{}) { + c.writeLog([]byte(fmt.Sprintf(format+"\n", args...))) +} + +func (c *C) logNewLine() { + c.writeLog([]byte{'\n'}) +} + +func (c *C) writeLog(buf []byte) { + c.logb.Write(buf) + if c.logw != nil { + c.logw.Write(buf) + } +} + +func hasStringOrError(x interface{}) (ok bool) { + _, ok = x.(fmt.Stringer) + if ok { + return + } + _, ok = x.(error) + return +} + +func (c *C) logValue(label string, value interface{}) { + if label == "" { + if hasStringOrError(value) { + c.logf("... %#v (%q)", value, value) + } else { + c.logf("... %#v", value) + } + } else if value == nil { + c.logf("... %s = nil", label) + } else { + if hasStringOrError(value) { + fv := fmt.Sprintf("%#v", value) + qv := fmt.Sprintf("%q", value) + if fv != qv { + c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv) + return + } + } + if s, ok := value.(string); ok && isMultiLine(s) { + c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value)) + c.logMultiLine(s) + } else { + c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value) + } + } +} + +func (c *C) logMultiLine(s string) { + b := make([]byte, 0, len(s)*2) + i := 0 + n := len(s) + for i < n { + j := i + 1 + for j < n && s[j-1] != '\n' { + j++ + } + b = append(b, "... "...) + b = strconv.AppendQuote(b, s[i:j]) + if j < n { + b = append(b, " +"...) + } + b = append(b, '\n') + i = j + } + c.writeLog(b) +} + +func isMultiLine(s string) bool { + for i := 0; i+1 < len(s); i++ { + if s[i] == '\n' { + return true + } + } + return false +} + +func (c *C) logString(issue string) { + c.log("... ", issue) +} + +func (c *C) logCaller(skip int) { + // This is a bit heavier than it ought to be. + skip += 1 // Our own frame. + pc, callerFile, callerLine, ok := runtime.Caller(skip) + if !ok { + return + } + var testFile string + var testLine int + testFunc := runtime.FuncForPC(c.method.PC()) + if runtime.FuncForPC(pc) != testFunc { + for { + skip += 1 + if pc, file, line, ok := runtime.Caller(skip); ok { + // Note that the test line may be different on + // distinct calls for the same test. Showing + // the "internal" line is helpful when debugging. + if runtime.FuncForPC(pc) == testFunc { + testFile, testLine = file, line + break + } + } else { + break + } + } + } + if testFile != "" && (testFile != callerFile || testLine != callerLine) { + c.logCode(testFile, testLine) + } + c.logCode(callerFile, callerLine) +} + +func (c *C) logCode(path string, line int) { + c.logf("%s:%d:", nicePath(path), line) + code, err := printLine(path, line) + if code == "" { + code = "..." // XXX Open the file and take the raw line. + if err != nil { + code += err.Error() + } + } + c.log(indent(code, " ")) +} + +var valueGo = filepath.Join("reflect", "value.go") +var asmGo = filepath.Join("runtime", "asm_") + +func (c *C) logPanic(skip int, value interface{}) { + skip++ // Our own frame. + initialSkip := skip + for ; ; skip++ { + if pc, file, line, ok := runtime.Caller(skip); ok { + if skip == initialSkip { + c.logf("... Panic: %s (PC=0x%X)\n", value, pc) + } + name := niceFuncName(pc) + path := nicePath(file) + if strings.Contains(path, "/gopkg.in/check.v") { + continue + } + if name == "Value.call" && strings.HasSuffix(path, valueGo) { + continue + } + if name == "call16" && strings.Contains(path, asmGo) { + continue + } + c.logf("%s:%d\n in %s", nicePath(file), line, name) + } else { + break + } + } +} + +func (c *C) logSoftPanic(issue string) { + c.log("... Panic: ", issue) +} + +func (c *C) logArgPanic(method *methodType, expectedType string) { + c.logf("... Panic: %s argument should be %s", + niceFuncName(method.PC()), expectedType) +} + +// ----------------------------------------------------------------------- +// Some simple formatting helpers. + +var initWD, initWDErr = os.Getwd() + +func init() { + if initWDErr == nil { + initWD = strings.Replace(initWD, "\\", "/", -1) + "/" + } +} + +func nicePath(path string) string { + if initWDErr == nil { + if strings.HasPrefix(path, initWD) { + return path[len(initWD):] + } + } + return path +} + +func niceFuncPath(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + filename, line := function.FileLine(pc) + return fmt.Sprintf("%s:%d", nicePath(filename), line) + } + return "" +} + +func niceFuncName(pc uintptr) string { + function := runtime.FuncForPC(pc) + if function != nil { + name := path.Base(function.Name()) + if i := strings.Index(name, "."); i > 0 { + name = name[i+1:] + } + if strings.HasPrefix(name, "(*") { + if i := strings.Index(name, ")"); i > 0 { + name = name[2:i] + name[i+1:] + } + } + if i := strings.LastIndex(name, ".*"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + if i := strings.LastIndex(name, "·"); i != -1 { + name = name[:i] + "." + name[i+2:] + } + return name + } + return "" +} + +// ----------------------------------------------------------------------- +// Result tracker to aggregate call results. + +type Result struct { + Succeeded int + Failed int + Skipped int + Panicked int + FixturePanicked int + ExpectedFailures int + Missed int // Not even tried to run, related to a panic in the fixture. + RunError error // Houston, we've got a problem. + WorkDir string // If KeepWorkDir is true +} + +type resultTracker struct { + result Result + _lastWasProblem bool + _waiting int + _missed int + _expectChan chan *C + _doneChan chan *C + _stopChan chan bool +} + +func newResultTracker() *resultTracker { + return &resultTracker{_expectChan: make(chan *C), // Synchronous + _doneChan: make(chan *C, 32), // Asynchronous + _stopChan: make(chan bool)} // Synchronous +} + +func (tracker *resultTracker) start() { + go tracker._loopRoutine() +} + +func (tracker *resultTracker) waitAndStop() { + <-tracker._stopChan +} + +func (tracker *resultTracker) expectCall(c *C) { + tracker._expectChan <- c +} + +func (tracker *resultTracker) callDone(c *C) { + tracker._doneChan <- c +} + +func (tracker *resultTracker) _loopRoutine() { + for { + var c *C + if tracker._waiting > 0 { + // Calls still running. Can't stop. + select { + // XXX Reindent this (not now to make diff clear) + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + tracker._waiting -= 1 + switch c.status { + case succeededSt: + if c.kind == testKd { + if c.mustFail { + tracker.result.ExpectedFailures++ + } else { + tracker.result.Succeeded++ + } + } + case failedSt: + tracker.result.Failed++ + case panickedSt: + if c.kind == fixtureKd { + tracker.result.FixturePanicked++ + } else { + tracker.result.Panicked++ + } + case fixturePanickedSt: + // Track it as missed, since the panic + // was on the fixture, not on the test. + tracker.result.Missed++ + case missedSt: + tracker.result.Missed++ + case skippedSt: + if c.kind == testKd { + tracker.result.Skipped++ + } + } + } + } else { + // No calls. Can stop, but no done calls here. + select { + case tracker._stopChan <- true: + return + case c = <-tracker._expectChan: + tracker._waiting += 1 + case c = <-tracker._doneChan: + panic("Tracker got an unexpected done call.") + } + } + } +} + +// ----------------------------------------------------------------------- +// The underlying suite runner. + +type suiteRunner struct { + suite interface{} + setUpSuite, tearDownSuite *methodType + setUpTest, tearDownTest *methodType + tests []*methodType + tracker *resultTracker + tempDir *tempDir + keepDir bool + output *outputWriter + reportedProblemLast bool + benchTime time.Duration + benchMem bool +} + +type RunConf struct { + Output io.Writer + Stream bool + Verbose bool + Filter string + Benchmark bool + BenchmarkTime time.Duration // Defaults to 1 second + BenchmarkMem bool + KeepWorkDir bool +} + +// Create a new suiteRunner able to run all methods in the given suite. +func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner { + var conf RunConf + if runConf != nil { + conf = *runConf + } + if conf.Output == nil { + conf.Output = os.Stdout + } + if conf.Benchmark { + conf.Verbose = true + } + + suiteType := reflect.TypeOf(suite) + suiteNumMethods := suiteType.NumMethod() + suiteValue := reflect.ValueOf(suite) + + runner := &suiteRunner{ + suite: suite, + output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose), + tracker: newResultTracker(), + benchTime: conf.BenchmarkTime, + benchMem: conf.BenchmarkMem, + tempDir: &tempDir{}, + keepDir: conf.KeepWorkDir, + tests: make([]*methodType, 0, suiteNumMethods), + } + if runner.benchTime == 0 { + runner.benchTime = 1 * time.Second + } + + var filterRegexp *regexp.Regexp + if conf.Filter != "" { + if regexp, err := regexp.Compile(conf.Filter); err != nil { + msg := "Bad filter expression: " + err.Error() + runner.tracker.result.RunError = errors.New(msg) + return runner + } else { + filterRegexp = regexp + } + } + + for i := 0; i != suiteNumMethods; i++ { + method := newMethod(suiteValue, i) + switch method.Info.Name { + case "SetUpSuite": + runner.setUpSuite = method + case "TearDownSuite": + runner.tearDownSuite = method + case "SetUpTest": + runner.setUpTest = method + case "TearDownTest": + runner.tearDownTest = method + default: + prefix := "Test" + if conf.Benchmark { + prefix = "Benchmark" + } + if !strings.HasPrefix(method.Info.Name, prefix) { + continue + } + if filterRegexp == nil || method.matches(filterRegexp) { + runner.tests = append(runner.tests, method) + } + } + } + return runner +} + +// Run all methods in the given suite. +func (runner *suiteRunner) run() *Result { + if runner.tracker.result.RunError == nil && len(runner.tests) > 0 { + runner.tracker.start() + if runner.checkFixtureArgs() { + c := runner.runFixture(runner.setUpSuite, "", nil) + if c == nil || c.status == succeededSt { + for i := 0; i != len(runner.tests); i++ { + c := runner.runTest(runner.tests[i]) + if c.status == fixturePanickedSt { + runner.skipTests(missedSt, runner.tests[i+1:]) + break + } + } + } else if c != nil && c.status == skippedSt { + runner.skipTests(skippedSt, runner.tests) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.runFixture(runner.tearDownSuite, "", nil) + } else { + runner.skipTests(missedSt, runner.tests) + } + runner.tracker.waitAndStop() + if runner.keepDir { + runner.tracker.result.WorkDir = runner.tempDir.path + } else { + runner.tempDir.removeAll() + } + } + return &runner.tracker.result +} + +// Create a call object with the given suite method, and fork a +// goroutine with the provided dispatcher for running it. +func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + var logw io.Writer + if runner.output.Stream { + logw = runner.output + } + if logb == nil { + logb = new(logger) + } + c := &C{ + method: method, + kind: kind, + testName: testName, + logb: logb, + logw: logw, + tempDir: runner.tempDir, + done: make(chan *C, 1), + timer: timer{benchTime: runner.benchTime}, + startTime: time.Now(), + benchMem: runner.benchMem, + } + runner.tracker.expectCall(c) + go (func() { + runner.reportCallStarted(c) + defer runner.callDone(c) + dispatcher(c) + })() + return c +} + +// Same as forkCall(), but wait for call to finish before returning. +func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C { + c := runner.forkCall(method, kind, testName, logb, dispatcher) + <-c.done + return c +} + +// Handle a finished call. If there were any panics, update the call status +// accordingly. Then, mark the call as done and report to the tracker. +func (runner *suiteRunner) callDone(c *C) { + value := recover() + if value != nil { + switch v := value.(type) { + case *fixturePanic: + if v.status == skippedSt { + c.status = skippedSt + } else { + c.logSoftPanic("Fixture has panicked (see related PANIC)") + c.status = fixturePanickedSt + } + default: + c.logPanic(1, value) + c.status = panickedSt + } + } + if c.mustFail { + switch c.status { + case failedSt: + c.status = succeededSt + case succeededSt: + c.status = failedSt + c.logString("Error: Test succeeded, but was expected to fail") + c.logString("Reason: " + c.reason) + } + } + + runner.reportCallDone(c) + c.done <- c +} + +// Runs a fixture call synchronously. The fixture will still be run in a +// goroutine like all suite methods, but this method will not return +// while the fixture goroutine is not done, because the fixture must be +// run in a desired order. +func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C { + if method != nil { + c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) { + c.ResetTimer() + c.StartTimer() + defer c.StopTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + }) + return c + } + return nil +} + +// Run the fixture method with runFixture(), but panic with a fixturePanic{} +// in case the fixture method panics. This makes it easier to track the +// fixture panic together with other call panics within forkTest(). +func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C { + if skipped != nil && *skipped { + return nil + } + c := runner.runFixture(method, testName, logb) + if c != nil && c.status != succeededSt { + if skipped != nil { + *skipped = c.status == skippedSt + } + panic(&fixturePanic{c.status, method}) + } + return c +} + +type fixturePanic struct { + status funcStatus + method *methodType +} + +// Run the suite test method, together with the test-specific fixture, +// asynchronously. +func (runner *suiteRunner) forkTest(method *methodType) *C { + testName := method.String() + return runner.forkCall(method, testKd, testName, nil, func(c *C) { + var skipped bool + defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped) + defer c.StopTimer() + benchN := 1 + for { + runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped) + mt := c.method.Type() + if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) { + // Rather than a plain panic, provide a more helpful message when + // the argument type is incorrect. + c.status = panickedSt + c.logArgPanic(c.method, "*check.C") + return + } + if strings.HasPrefix(c.method.Info.Name, "Test") { + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + return + } + if !strings.HasPrefix(c.method.Info.Name, "Benchmark") { + panic("unexpected method prefix: " + c.method.Info.Name) + } + + runtime.GC() + c.N = benchN + c.ResetTimer() + c.StartTimer() + c.method.Call([]reflect.Value{reflect.ValueOf(c)}) + c.StopTimer() + if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 { + return + } + perOpN := int(1e9) + if c.nsPerOp() != 0 { + perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp()) + } + + // Logic taken from the stock testing package: + // - Run more iterations than we think we'll need for a second (1.5x). + // - Don't grow too fast in case we had timing errors previously. + // - Be sure to run at least one more than last time. + benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1) + benchN = roundUp(benchN) + + skipped = true // Don't run the deferred one if this panics. + runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil) + skipped = false + } + }) +} + +// Same as forkTest(), but wait for the test to finish before returning. +func (runner *suiteRunner) runTest(method *methodType) *C { + c := runner.forkTest(method) + <-c.done + return c +} + +// Helper to mark tests as skipped or missed. A bit heavy for what +// it does, but it enables homogeneous handling of tracking, including +// nice verbose output. +func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) { + for _, method := range methods { + runner.runFunc(method, testKd, "", nil, func(c *C) { + c.status = status + }) + } +} + +// Verify if the fixture arguments are *check.C. In case of errors, +// log the error as a panic in the fixture method call, and return false. +func (runner *suiteRunner) checkFixtureArgs() bool { + succeeded := true + argType := reflect.TypeOf(&C{}) + for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} { + if method != nil { + mt := method.Type() + if mt.NumIn() != 1 || mt.In(0) != argType { + succeeded = false + runner.runFunc(method, fixtureKd, "", nil, func(c *C) { + c.logArgPanic(method, "*check.C") + c.status = panickedSt + }) + } + } + } + return succeeded +} + +func (runner *suiteRunner) reportCallStarted(c *C) { + runner.output.WriteCallStarted("START", c) +} + +func (runner *suiteRunner) reportCallDone(c *C) { + runner.tracker.callDone(c) + switch c.status { + case succeededSt: + if c.mustFail { + runner.output.WriteCallSuccess("FAIL EXPECTED", c) + } else { + runner.output.WriteCallSuccess("PASS", c) + } + case skippedSt: + runner.output.WriteCallSuccess("SKIP", c) + case failedSt: + runner.output.WriteCallProblem("FAIL", c) + case panickedSt: + runner.output.WriteCallProblem("PANIC", c) + case fixturePanickedSt: + // That's a testKd call reporting that its fixture + // has panicked. The fixture call which caused the + // panic itself was tracked above. We'll report to + // aid debugging. + runner.output.WriteCallProblem("PANIC", c) + case missedSt: + runner.output.WriteCallSuccess("MISS", c) + } +} + +// ----------------------------------------------------------------------- +// Output writer manages atomic output writing according to settings. + +type outputWriter struct { + m sync.Mutex + writer io.Writer + wroteCallProblemLast bool + Stream bool + Verbose bool +} + +func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter { + return &outputWriter{writer: writer, Stream: stream, Verbose: verbose} +} + +func (ow *outputWriter) Write(content []byte) (n int, err error) { + ow.m.Lock() + n, err = ow.writer.Write(content) + ow.m.Unlock() + return +} + +func (ow *outputWriter) WriteCallStarted(label string, c *C) { + if ow.Stream { + header := renderCallHeader(label, c, "", "\n") + ow.m.Lock() + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func (ow *outputWriter) WriteCallProblem(label string, c *C) { + var prefix string + if !ow.Stream { + prefix = "\n-----------------------------------" + + "-----------------------------------\n" + } + header := renderCallHeader(label, c, prefix, "\n\n") + ow.m.Lock() + ow.wroteCallProblemLast = true + ow.writer.Write([]byte(header)) + if !ow.Stream { + c.logb.WriteTo(ow.writer) + } + ow.m.Unlock() +} + +func (ow *outputWriter) WriteCallSuccess(label string, c *C) { + if ow.Stream || (ow.Verbose && c.kind == testKd) { + // TODO Use a buffer here. + var suffix string + if c.reason != "" { + suffix = " (" + c.reason + ")" + } + if c.status == succeededSt { + suffix += "\t" + c.timerString() + } + suffix += "\n" + if ow.Stream { + suffix += "\n" + } + header := renderCallHeader(label, c, "", suffix) + ow.m.Lock() + // Resist temptation of using line as prefix above due to race. + if !ow.Stream && ow.wroteCallProblemLast { + header = "\n-----------------------------------" + + "-----------------------------------\n" + + header + } + ow.wroteCallProblemLast = false + ow.writer.Write([]byte(header)) + ow.m.Unlock() + } +} + +func renderCallHeader(label string, c *C, prefix, suffix string) string { + pc := c.method.PC() + return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc), + niceFuncName(pc), suffix) +} diff --git a/vendor/src/github.com/go-check/check/check_test.go b/vendor/src/github.com/go-check/check/check_test.go new file mode 100644 index 0000000000000..871b325276a21 --- /dev/null +++ b/vendor/src/github.com/go-check/check/check_test.go @@ -0,0 +1,207 @@ +// This file contains just a few generic helpers which are used by the +// other test files. + +package check_test + +import ( + "flag" + "fmt" + "os" + "regexp" + "runtime" + "testing" + "time" + + "gopkg.in/check.v1" +) + +// We count the number of suites run at least to get a vague hint that the +// test suite is behaving as it should. Otherwise a bug introduced at the +// very core of the system could go unperceived. +const suitesRunExpected = 8 + +var suitesRun int = 0 + +func Test(t *testing.T) { + check.TestingT(t) + if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" { + critical(fmt.Sprintf("Expected %d suites to run rather than %d", + suitesRunExpected, suitesRun)) + } +} + +// ----------------------------------------------------------------------- +// Helper functions. + +// Break down badly. This is used in test cases which can't yet assume +// that the fundamental bits are working. +func critical(error string) { + fmt.Fprintln(os.Stderr, "CRITICAL: "+error) + os.Exit(1) +} + +// Return the file line where it's called. +func getMyLine() int { + if _, _, line, ok := runtime.Caller(1); ok { + return line + } + return -1 +} + +// ----------------------------------------------------------------------- +// Helper type implementing a basic io.Writer for testing output. + +// Type implementing the io.Writer interface for analyzing output. +type String struct { + value string +} + +// The only function required by the io.Writer interface. Will append +// written data to the String.value string. +func (s *String) Write(p []byte) (n int, err error) { + s.value += string(p) + return len(p), nil +} + +// Trivial wrapper to test errors happening on a different file +// than the test itself. +func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) { + return c.Check(obtained, check.Equals, expected), getMyLine() +} + +// ----------------------------------------------------------------------- +// Helper suite for testing basic fail behavior. + +type FailHelper struct { + testLine int +} + +func (s *FailHelper) TestLogAndFail(c *check.C) { + s.testLine = getMyLine() - 1 + c.Log("Expected failure!") + c.Fail() +} + +// ----------------------------------------------------------------------- +// Helper suite for testing basic success behavior. + +type SuccessHelper struct{} + +func (s *SuccessHelper) TestLogAndSucceed(c *check.C) { + c.Log("Expected success!") +} + +// ----------------------------------------------------------------------- +// Helper suite for testing ordering and behavior of fixture. + +type FixtureHelper struct { + calls []string + panicOn string + skip bool + skipOnN int + sleepOn string + sleep time.Duration + bytes int64 +} + +func (s *FixtureHelper) trace(name string, c *check.C) { + s.calls = append(s.calls, name) + if name == s.panicOn { + panic(name) + } + if s.sleep > 0 && s.sleepOn == name { + time.Sleep(s.sleep) + } + if s.skip && s.skipOnN == len(s.calls)-1 { + c.Skip("skipOnN == n") + } +} + +func (s *FixtureHelper) SetUpSuite(c *check.C) { + s.trace("SetUpSuite", c) +} + +func (s *FixtureHelper) TearDownSuite(c *check.C) { + s.trace("TearDownSuite", c) +} + +func (s *FixtureHelper) SetUpTest(c *check.C) { + s.trace("SetUpTest", c) +} + +func (s *FixtureHelper) TearDownTest(c *check.C) { + s.trace("TearDownTest", c) +} + +func (s *FixtureHelper) Test1(c *check.C) { + s.trace("Test1", c) +} + +func (s *FixtureHelper) Test2(c *check.C) { + s.trace("Test2", c) +} + +func (s *FixtureHelper) Benchmark1(c *check.C) { + s.trace("Benchmark1", c) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + } +} + +func (s *FixtureHelper) Benchmark2(c *check.C) { + s.trace("Benchmark2", c) + c.SetBytes(1024) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + } +} + +func (s *FixtureHelper) Benchmark3(c *check.C) { + var x []int64 + s.trace("Benchmark3", c) + for i := 0; i < c.N; i++ { + time.Sleep(s.sleep) + x = make([]int64, 5) + _ = x + } +} + +// ----------------------------------------------------------------------- +// Helper which checks the state of the test and ensures that it matches +// the given expectations. Depends on c.Errorf() working, so shouldn't +// be used to test this one function. + +type expectedState struct { + name string + result interface{} + failed bool + log string +} + +// Verify the state of the test. Note that since this also verifies if +// the test is supposed to be in a failed state, no other checks should +// be done in addition to what is being tested. +func checkState(c *check.C, result interface{}, expected *expectedState) { + failed := c.Failed() + c.Succeed() + log := c.GetTestLog() + matched, matchError := regexp.MatchString("^"+expected.log+"$", log) + if matchError != nil { + c.Errorf("Error in matching expression used in testing %s", + expected.name) + } else if !matched { + c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------", + expected.name, log, expected.log) + } + if result != expected.result { + c.Errorf("%s returned %#v rather than %#v", + expected.name, result, expected.result) + } + if failed != expected.failed { + if failed { + c.Errorf("%s has failed when it shouldn't", expected.name) + } else { + c.Errorf("%s has not failed when it should", expected.name) + } + } +} diff --git a/vendor/src/github.com/go-check/check/checkers.go b/vendor/src/github.com/go-check/check/checkers.go new file mode 100644 index 0000000000000..bac338729c887 --- /dev/null +++ b/vendor/src/github.com/go-check/check/checkers.go @@ -0,0 +1,458 @@ +package check + +import ( + "fmt" + "reflect" + "regexp" +) + +// ----------------------------------------------------------------------- +// CommentInterface and Commentf helper, to attach extra information to checks. + +type comment struct { + format string + args []interface{} +} + +// Commentf returns an infomational value to use with Assert or Check calls. +// If the checker test fails, the provided arguments will be passed to +// fmt.Sprintf, and will be presented next to the logged failure. +// +// For example: +// +// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i)) +// +// Note that if the comment is constant, a better option is to +// simply use a normal comment right above or next to the line, as +// it will also get printed with any errors: +// +// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123) +// +func Commentf(format string, args ...interface{}) CommentInterface { + return &comment{format, args} +} + +// CommentInterface must be implemented by types that attach extra +// information to failed checks. See the Commentf function for details. +type CommentInterface interface { + CheckCommentString() string +} + +func (c *comment) CheckCommentString() string { + return fmt.Sprintf(c.format, c.args...) +} + +// ----------------------------------------------------------------------- +// The Checker interface. + +// The Checker interface must be provided by checkers used with +// the Assert and Check verification methods. +type Checker interface { + Info() *CheckerInfo + Check(params []interface{}, names []string) (result bool, error string) +} + +// See the Checker interface. +type CheckerInfo struct { + Name string + Params []string +} + +func (info *CheckerInfo) Info() *CheckerInfo { + return info +} + +// ----------------------------------------------------------------------- +// Not checker logic inverter. + +// The Not checker inverts the logic of the provided checker. The +// resulting checker will succeed where the original one failed, and +// vice-versa. +// +// For example: +// +// c.Assert(a, Not(Equals), b) +// +func Not(checker Checker) Checker { + return ¬Checker{checker} +} + +type notChecker struct { + sub Checker +} + +func (checker *notChecker) Info() *CheckerInfo { + info := *checker.sub.Info() + info.Name = "Not(" + info.Name + ")" + return &info +} + +func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) { + result, error = checker.sub.Check(params, names) + result = !result + return +} + +// ----------------------------------------------------------------------- +// IsNil checker. + +type isNilChecker struct { + *CheckerInfo +} + +// The IsNil checker tests whether the obtained value is nil. +// +// For example: +// +// c.Assert(err, IsNil) +// +var IsNil Checker = &isNilChecker{ + &CheckerInfo{Name: "IsNil", Params: []string{"value"}}, +} + +func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return isNil(params[0]), "" +} + +func isNil(obtained interface{}) (result bool) { + if obtained == nil { + result = true + } else { + switch v := reflect.ValueOf(obtained); v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + } + return +} + +// ----------------------------------------------------------------------- +// NotNil checker. Alias for Not(IsNil), since it's so common. + +type notNilChecker struct { + *CheckerInfo +} + +// The NotNil checker verifies that the obtained value is not nil. +// +// For example: +// +// c.Assert(iface, NotNil) +// +// This is an alias for Not(IsNil), made available since it's a +// fairly common check. +// +var NotNil Checker = ¬NilChecker{ + &CheckerInfo{Name: "NotNil", Params: []string{"value"}}, +} + +func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) { + return !isNil(params[0]), "" +} + +// ----------------------------------------------------------------------- +// Equals checker. + +type equalsChecker struct { + *CheckerInfo +} + +// The Equals checker verifies that the obtained value is equal to +// the expected value, according to usual Go semantics for ==. +// +// For example: +// +// c.Assert(value, Equals, 42) +// +var Equals Checker = &equalsChecker{ + &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}}, +} + +func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) { + defer func() { + if v := recover(); v != nil { + result = false + error = fmt.Sprint(v) + } + }() + return params[0] == params[1], "" +} + +// ----------------------------------------------------------------------- +// DeepEquals checker. + +type deepEqualsChecker struct { + *CheckerInfo +} + +// The DeepEquals checker verifies that the obtained value is deep-equal to +// the expected value. The check will work correctly even when facing +// slices, interfaces, and values of different types (which always fail +// the test). +// +// For example: +// +// c.Assert(value, DeepEquals, 42) +// c.Assert(array, DeepEquals, []string{"hi", "there"}) +// +var DeepEquals Checker = &deepEqualsChecker{ + &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}}, +} + +func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) { + return reflect.DeepEqual(params[0], params[1]), "" +} + +// ----------------------------------------------------------------------- +// HasLen checker. + +type hasLenChecker struct { + *CheckerInfo +} + +// The HasLen checker verifies that the obtained value has the +// provided length. In many cases this is superior to using Equals +// in conjuction with the len function because in case the check +// fails the value itself will be printed, instead of its length, +// providing more details for figuring the problem. +// +// For example: +// +// c.Assert(list, HasLen, 5) +// +var HasLen Checker = &hasLenChecker{ + &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}}, +} + +func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) { + n, ok := params[1].(int) + if !ok { + return false, "n must be an int" + } + value := reflect.ValueOf(params[0]) + switch value.Kind() { + case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String: + default: + return false, "obtained value type has no length" + } + return value.Len() == n, "" +} + +// ----------------------------------------------------------------------- +// ErrorMatches checker. + +type errorMatchesChecker struct { + *CheckerInfo +} + +// The ErrorMatches checker verifies that the error value +// is non nil and matches the regular expression provided. +// +// For example: +// +// c.Assert(err, ErrorMatches, "perm.*denied") +// +var ErrorMatches Checker = errorMatchesChecker{ + &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}}, +} + +func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) { + if params[0] == nil { + return false, "Error value is nil" + } + err, ok := params[0].(error) + if !ok { + return false, "Value is not an error" + } + params[0] = err.Error() + names[0] = "error" + return matches(params[0], params[1]) +} + +// ----------------------------------------------------------------------- +// Matches checker. + +type matchesChecker struct { + *CheckerInfo +} + +// The Matches checker verifies that the string provided as the obtained +// value (or the string resulting from obtained.String()) matches the +// regular expression provided. +// +// For example: +// +// c.Assert(err, Matches, "perm.*denied") +// +var Matches Checker = &matchesChecker{ + &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}}, +} + +func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) { + return matches(params[0], params[1]) +} + +func matches(value, regex interface{}) (result bool, error string) { + reStr, ok := regex.(string) + if !ok { + return false, "Regex must be a string" + } + valueStr, valueIsStr := value.(string) + if !valueIsStr { + if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr { + valueStr, valueIsStr = valueWithStr.String(), true + } + } + if valueIsStr { + matches, err := regexp.MatchString("^"+reStr+"$", valueStr) + if err != nil { + return false, "Can't compile regex: " + err.Error() + } + return matches, "" + } + return false, "Obtained value is not a string and has no .String()" +} + +// ----------------------------------------------------------------------- +// Panics checker. + +type panicsChecker struct { + *CheckerInfo +} + +// The Panics checker verifies that calling the provided zero-argument +// function will cause a panic which is deep-equal to the provided value. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}). +// +// +var Panics Checker = &panicsChecker{ + &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}}, +} + +func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if error != "" { + return + } + params[0] = recover() + names[0] = "panic" + result = reflect.DeepEqual(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +type panicMatchesChecker struct { + *CheckerInfo +} + +// The PanicMatches checker verifies that calling the provided zero-argument +// function will cause a panic with an error value matching +// the regular expression provided. +// +// For example: +// +// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`). +// +// +var PanicMatches Checker = &panicMatchesChecker{ + &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}}, +} + +func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) { + f := reflect.ValueOf(params[0]) + if f.Kind() != reflect.Func || f.Type().NumIn() != 0 { + return false, "Function must take zero arguments" + } + defer func() { + // If the function has not panicked, then don't do the check. + if errmsg != "" { + return + } + obtained := recover() + names[0] = "panic" + if e, ok := obtained.(error); ok { + params[0] = e.Error() + } else if _, ok := obtained.(string); ok { + params[0] = obtained + } else { + errmsg = "Panic value is not a string or an error" + return + } + result, errmsg = matches(params[0], params[1]) + }() + f.Call(nil) + return false, "Function has not panicked" +} + +// ----------------------------------------------------------------------- +// FitsTypeOf checker. + +type fitsTypeChecker struct { + *CheckerInfo +} + +// The FitsTypeOf checker verifies that the obtained value is +// assignable to a variable with the same type as the provided +// sample value. +// +// For example: +// +// c.Assert(value, FitsTypeOf, int64(0)) +// c.Assert(value, FitsTypeOf, os.Error(nil)) +// +var FitsTypeOf Checker = &fitsTypeChecker{ + &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}}, +} + +func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + sample := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !sample.IsValid() { + return false, "Invalid sample value" + } + return obtained.Type().AssignableTo(sample.Type()), "" +} + +// ----------------------------------------------------------------------- +// Implements checker. + +type implementsChecker struct { + *CheckerInfo +} + +// The Implements checker verifies that the obtained value +// implements the interface specified via a pointer to an interface +// variable. +// +// For example: +// +// var e os.Error +// c.Assert(err, Implements, &e) +// +var Implements Checker = &implementsChecker{ + &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}}, +} + +func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) { + obtained := reflect.ValueOf(params[0]) + ifaceptr := reflect.ValueOf(params[1]) + if !obtained.IsValid() { + return false, "" + } + if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface { + return false, "ifaceptr should be a pointer to an interface variable" + } + return obtained.Type().Implements(ifaceptr.Elem().Type()), "" +} diff --git a/vendor/src/github.com/go-check/check/checkers_test.go b/vendor/src/github.com/go-check/check/checkers_test.go new file mode 100644 index 0000000000000..5c697474696b3 --- /dev/null +++ b/vendor/src/github.com/go-check/check/checkers_test.go @@ -0,0 +1,272 @@ +package check_test + +import ( + "errors" + "gopkg.in/check.v1" + "reflect" + "runtime" +) + +type CheckersS struct{} + +var _ = check.Suite(&CheckersS{}) + +func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) { + info := checker.Info() + if info.Name != name { + c.Fatalf("Got name %s, expected %s", info.Name, name) + } + if !reflect.DeepEqual(info.Params, paramNames) { + c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames) + } +} + +func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) { + info := checker.Info() + if len(params) != len(info.Params) { + c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params)) + } + names := append([]string{}, info.Params...) + result_, error_ := checker.Check(params, names) + if result_ != result || error_ != error { + c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)", + info.Name, params, result_, error_, result, error) + } + return params, names +} + +func (s *CheckersS) TestComment(c *check.C) { + bug := check.Commentf("a %d bc", 42) + comment := bug.CheckCommentString() + if comment != "a 42 bc" { + c.Fatalf("Commentf returned %#v", comment) + } +} + +func (s *CheckersS) TestIsNil(c *check.C) { + testInfo(c, check.IsNil, "IsNil", []string{"value"}) + + testCheck(c, check.IsNil, true, "", nil) + testCheck(c, check.IsNil, false, "", "a") + + testCheck(c, check.IsNil, true, "", (chan int)(nil)) + testCheck(c, check.IsNil, false, "", make(chan int)) + testCheck(c, check.IsNil, true, "", (error)(nil)) + testCheck(c, check.IsNil, false, "", errors.New("")) + testCheck(c, check.IsNil, true, "", ([]int)(nil)) + testCheck(c, check.IsNil, false, "", make([]int, 1)) + testCheck(c, check.IsNil, false, "", int(0)) +} + +func (s *CheckersS) TestNotNil(c *check.C) { + testInfo(c, check.NotNil, "NotNil", []string{"value"}) + + testCheck(c, check.NotNil, false, "", nil) + testCheck(c, check.NotNil, true, "", "a") + + testCheck(c, check.NotNil, false, "", (chan int)(nil)) + testCheck(c, check.NotNil, true, "", make(chan int)) + testCheck(c, check.NotNil, false, "", (error)(nil)) + testCheck(c, check.NotNil, true, "", errors.New("")) + testCheck(c, check.NotNil, false, "", ([]int)(nil)) + testCheck(c, check.NotNil, true, "", make([]int, 1)) +} + +func (s *CheckersS) TestNot(c *check.C) { + testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"}) + + testCheck(c, check.Not(check.IsNil), false, "", nil) + testCheck(c, check.Not(check.IsNil), true, "", "a") +} + +type simpleStruct struct { + i int +} + +func (s *CheckersS) TestEquals(c *check.C) { + testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"}) + + // The simplest. + testCheck(c, check.Equals, true, "", 42, 42) + testCheck(c, check.Equals, false, "", 42, 43) + + // Different native types. + testCheck(c, check.Equals, false, "", int32(42), int64(42)) + + // With nil. + testCheck(c, check.Equals, false, "", 42, nil) + + // Slices + testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2}) + + // Struct values + testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1}) + testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2}) + + // Struct pointers + testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1}) + testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2}) +} + +func (s *CheckersS) TestDeepEquals(c *check.C) { + testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"}) + + // The simplest. + testCheck(c, check.DeepEquals, true, "", 42, 42) + testCheck(c, check.DeepEquals, false, "", 42, 43) + + // Different native types. + testCheck(c, check.DeepEquals, false, "", int32(42), int64(42)) + + // With nil. + testCheck(c, check.DeepEquals, false, "", 42, nil) + + // Slices + testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2}) + testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3}) + + // Struct values + testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1}) + testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2}) + + // Struct pointers + testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1}) + testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2}) +} + +func (s *CheckersS) TestHasLen(c *check.C) { + testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"}) + + testCheck(c, check.HasLen, true, "", "abcd", 4) + testCheck(c, check.HasLen, true, "", []int{1, 2}, 2) + testCheck(c, check.HasLen, false, "", []int{1, 2}, 3) + + testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2") + testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2) +} + +func (s *CheckersS) TestErrorMatches(c *check.C) { + testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"}) + + testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error") + testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error") + testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error") + testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or") + + // Verify params mutation + params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error") + c.Assert(params[0], check.Equals, "some error") + c.Assert(names[0], check.Equals, "error") +} + +func (s *CheckersS) TestMatches(c *check.C) { + testInfo(c, check.Matches, "Matches", []string{"value", "regex"}) + + // Simple matching + testCheck(c, check.Matches, true, "", "abc", "abc") + testCheck(c, check.Matches, true, "", "abc", "a.c") + + // Must match fully + testCheck(c, check.Matches, false, "", "abc", "ab") + testCheck(c, check.Matches, false, "", "abc", "bc") + + // String()-enabled values accepted + testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c") + testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d") + + // Some error conditions. + testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c") + testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c") +} + +func (s *CheckersS) TestPanics(c *check.C) { + testInfo(c, check.Panics, "Panics", []string{"function", "expected"}) + + // Some errors. + testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM") + testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM") + + // Plain strings. + testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM") + testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM") + testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM") + + // Error values. + testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM")) + testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) + + type deep struct{ i int } + // Deep value + testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99}) + + // Verify params/names mutation + params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM")) + c.Assert(params[0], check.ErrorMatches, "KABOOM") + c.Assert(names[0], check.Equals, "panic") + + // Verify a nil panic + testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil) + testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE") +} + +func (s *CheckersS) TestPanicMatches(c *check.C) { + testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"}) + + // Error matching. + testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M") + testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M") + + // Some errors. + testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM") + testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM") + + // Plain strings. + testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M") + testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM") + testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M") + + // Verify params/names mutation + params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM") + c.Assert(params[0], check.Equals, "KABOOM") + c.Assert(names[0], check.Equals, "panic") + + // Verify a nil panic + testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "") +} + +func (s *CheckersS) TestFitsTypeOf(c *check.C) { + testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"}) + + // Basic types + testCheck(c, check.FitsTypeOf, true, "", 1, 0) + testCheck(c, check.FitsTypeOf, false, "", 1, int64(0)) + + // Aliases + testCheck(c, check.FitsTypeOf, false, "", 1, errors.New("")) + testCheck(c, check.FitsTypeOf, false, "", "error", errors.New("")) + testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New("")) + + // Structures + testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{}) + testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{}) + testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{}) + testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{}) + + // Some bad values + testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil)) + testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0) +} + +func (s *CheckersS) TestImplements(c *check.C) { + testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"}) + + var e error + var re runtime.Error + testCheck(c, check.Implements, true, "", errors.New(""), &e) + testCheck(c, check.Implements, false, "", errors.New(""), &re) + + // Some bad values + testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New("")) + testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil)) + testCheck(c, check.Implements, false, "", interface{}(nil), &e) +} diff --git a/vendor/src/github.com/go-check/check/export_test.go b/vendor/src/github.com/go-check/check/export_test.go new file mode 100644 index 0000000000000..0e6cfe0f22d80 --- /dev/null +++ b/vendor/src/github.com/go-check/check/export_test.go @@ -0,0 +1,9 @@ +package check + +func PrintLine(filename string, line int) (string, error) { + return printLine(filename, line) +} + +func Indent(s, with string) string { + return indent(s, with) +} diff --git a/vendor/src/github.com/go-check/check/fixture_test.go b/vendor/src/github.com/go-check/check/fixture_test.go new file mode 100644 index 0000000000000..2bff9e1633173 --- /dev/null +++ b/vendor/src/github.com/go-check/check/fixture_test.go @@ -0,0 +1,484 @@ +// Tests for the behavior of the test fixture system. + +package check_test + +import ( + . "gopkg.in/check.v1" +) + +// ----------------------------------------------------------------------- +// Fixture test suite. + +type FixtureS struct{} + +var fixtureS = Suite(&FixtureS{}) + +func (s *FixtureS) TestCountSuite(c *C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Basic fixture ordering verification. + +func (s *FixtureS) TestOrder(c *C) { + helper := FixtureHelper{} + Run(&helper, nil) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +// ----------------------------------------------------------------------- +// Check the behavior when panics occur within tests and fixtures. + +func (s *FixtureS) TestPanicOnTest(c *C) { + helper := FixtureHelper{panicOn: "Test1"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" + + "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in (go)?panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.Test1\n" + + "(.|\n)*$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnSetUpTest(c *C) { + helper := FixtureHelper{panicOn: "SetUpTest"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 4) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in (go)?panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.SetUpTest\n" + + "(.|\n)*" + + "\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: Fixture has panicked " + + "\\(see related PANIC\\)\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnTearDownTest(c *C) { + helper := FixtureHelper{panicOn: "TearDownTest"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.TearDownTest\n\n" + + "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in (go)?panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.TearDownTest\n" + + "(.|\n)*" + + "\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: Fixture has panicked " + + "\\(see related PANIC\\)\n$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnSetUpSuite(c *C) { + helper := FixtureHelper{panicOn: "SetUpSuite"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 2) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.SetUpSuite\n\n" + + "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in (go)?panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.SetUpSuite\n" + + "(.|\n)*$" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnTearDownSuite(c *C) { + helper := FixtureHelper{panicOn: "TearDownSuite"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) + + expected := "^\n-+\n" + + "PANIC: check_test\\.go:[0-9]+: " + + "FixtureHelper.TearDownSuite\n\n" + + "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" + + ".+:[0-9]+\n" + + " in (go)?panic\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.trace\n" + + ".*check_test.go:[0-9]+\n" + + " in FixtureHelper.TearDownSuite\n" + + "(.|\n)*$" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// A wrong argument on a test or fixture will produce a nice error. + +func (s *FixtureS) TestPanicOnWrongTestArg(c *C) { + helper := WrongTestArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "SetUpTest") + c.Check(helper.calls[4], Equals, "Test2") + c.Check(helper.calls[5], Equals, "TearDownTest") + c.Check(helper.calls[6], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 7) + + expected := "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongTestArgHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) { + helper := WrongSetUpTestArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpTestArgHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) { + helper := WrongSetUpSuiteArgHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" + + "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Nice errors also when tests or fixture have wrong arg count. + +func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) { + helper := WrongTestArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "TearDownTest") + c.Check(helper.calls[3], Equals, "SetUpTest") + c.Check(helper.calls[4], Equals, "Test2") + c.Check(helper.calls[5], Equals, "TearDownTest") + c.Check(helper.calls[6], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 7) + + expected := "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongTestArgCountHelper\\.Test1\n\n" + + "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) { + helper := WrongSetUpTestArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" + + "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " + + "should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) { + helper := WrongSetUpSuiteArgCountHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(len(helper.calls), Equals, 0) + + expected := + "^\n-+\n" + + "PANIC: fixture_test\\.go:[0-9]+: " + + "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" + + "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" + + "\\.SetUpSuite argument should be \\*check\\.C\n" + + c.Check(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Helper test suites with wrong function arguments. + +type WrongTestArgHelper struct { + FixtureHelper +} + +func (s *WrongTestArgHelper) Test1(t int) { +} + +type WrongSetUpTestArgHelper struct { + FixtureHelper +} + +func (s *WrongSetUpTestArgHelper) SetUpTest(t int) { +} + +type WrongSetUpSuiteArgHelper struct { + FixtureHelper +} + +func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) { +} + +type WrongTestArgCountHelper struct { + FixtureHelper +} + +func (s *WrongTestArgCountHelper) Test1(c *C, i int) { +} + +type WrongSetUpTestArgCountHelper struct { + FixtureHelper +} + +func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) { +} + +type WrongSetUpSuiteArgCountHelper struct { + FixtureHelper +} + +func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) { +} + +// ----------------------------------------------------------------------- +// Ensure fixture doesn't run without tests. + +type NoTestsHelper struct { + hasRun bool +} + +func (s *NoTestsHelper) SetUpSuite(c *C) { + s.hasRun = true +} + +func (s *NoTestsHelper) TearDownSuite(c *C) { + s.hasRun = true +} + +func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) { + helper := NoTestsHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Check(helper.hasRun, Equals, false) +} + +// ----------------------------------------------------------------------- +// Verify that checks and assertions work correctly inside the fixture. + +type FixtureCheckHelper struct { + fail string + completed bool +} + +func (s *FixtureCheckHelper) SetUpSuite(c *C) { + switch s.fail { + case "SetUpSuiteAssert": + c.Assert(false, Equals, true) + case "SetUpSuiteCheck": + c.Check(false, Equals, true) + } + s.completed = true +} + +func (s *FixtureCheckHelper) SetUpTest(c *C) { + switch s.fail { + case "SetUpTestAssert": + c.Assert(false, Equals, true) + case "SetUpTestCheck": + c.Check(false, Equals, true) + } + s.completed = true +} + +func (s *FixtureCheckHelper) Test(c *C) { + // Do nothing. +} + +func (s *FixtureS) TestSetUpSuiteCheck(c *C) { + helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureCheckHelper\\.SetUpSuite\n\n"+ + "fixture_test\\.go:[0-9]+:\n"+ + " c\\.Check\\(false, Equals, true\\)\n"+ + "\\.+ obtained bool = false\n"+ + "\\.+ expected bool = true\n\n") + c.Assert(helper.completed, Equals, true) +} + +func (s *FixtureS) TestSetUpSuiteAssert(c *C) { + helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureCheckHelper\\.SetUpSuite\n\n"+ + "fixture_test\\.go:[0-9]+:\n"+ + " c\\.Assert\\(false, Equals, true\\)\n"+ + "\\.+ obtained bool = false\n"+ + "\\.+ expected bool = true\n\n") + c.Assert(helper.completed, Equals, false) +} + +// ----------------------------------------------------------------------- +// Verify that logging within SetUpTest() persists within the test log itself. + +type FixtureLogHelper struct { + c *C +} + +func (s *FixtureLogHelper) SetUpTest(c *C) { + s.c = c + c.Log("1") +} + +func (s *FixtureLogHelper) Test(c *C) { + c.Log("2") + s.c.Log("3") + c.Log("4") + c.Fail() +} + +func (s *FixtureLogHelper) TearDownTest(c *C) { + s.c.Log("5") +} + +func (s *FixtureS) TestFixtureLogging(c *C) { + helper := FixtureLogHelper{} + output := String{} + Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Matches, + "\n---+\n"+ + "FAIL: fixture_test\\.go:[0-9]+: "+ + "FixtureLogHelper\\.Test\n\n"+ + "1\n2\n3\n4\n5\n") +} + +// ----------------------------------------------------------------------- +// Skip() within fixture methods. + +func (s *FixtureS) TestSkipSuite(c *C) { + helper := FixtureHelper{skip: true, skipOnN: 0} + output := String{} + result := Run(&helper, &RunConf{Output: &output}) + c.Assert(output.value, Equals, "") + c.Assert(helper.calls[0], Equals, "SetUpSuite") + c.Assert(helper.calls[1], Equals, "TearDownSuite") + c.Assert(len(helper.calls), Equals, 2) + c.Assert(result.Skipped, Equals, 2) +} + +func (s *FixtureS) TestSkipTest(c *C) { + helper := FixtureHelper{skip: true, skipOnN: 1} + output := String{} + result := Run(&helper, &RunConf{Output: &output}) + c.Assert(helper.calls[0], Equals, "SetUpSuite") + c.Assert(helper.calls[1], Equals, "SetUpTest") + c.Assert(helper.calls[2], Equals, "SetUpTest") + c.Assert(helper.calls[3], Equals, "Test2") + c.Assert(helper.calls[4], Equals, "TearDownTest") + c.Assert(helper.calls[5], Equals, "TearDownSuite") + c.Assert(len(helper.calls), Equals, 6) + c.Assert(result.Skipped, Equals, 1) +} diff --git a/vendor/src/github.com/go-check/check/foundation_test.go b/vendor/src/github.com/go-check/check/foundation_test.go new file mode 100644 index 0000000000000..8ecf7915f233a --- /dev/null +++ b/vendor/src/github.com/go-check/check/foundation_test.go @@ -0,0 +1,335 @@ +// These tests check that the foundations of gocheck are working properly. +// They already assume that fundamental failing is working already, though, +// since this was tested in bootstrap_test.go. Even then, some care may +// still have to be taken when using external functions, since they should +// of course not rely on functionality tested here. + +package check_test + +import ( + "fmt" + "gopkg.in/check.v1" + "log" + "os" + "regexp" + "strings" +) + +// ----------------------------------------------------------------------- +// Foundation test suite. + +type FoundationS struct{} + +var foundationS = check.Suite(&FoundationS{}) + +func (s *FoundationS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *FoundationS) TestErrorf(c *check.C) { + // Do not use checkState() here. It depends on Errorf() working. + expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Errorf(\"Error %%v!\", \"message\")\n"+ + "... Error: Error message!\n\n", + getMyLine()+1) + c.Errorf("Error %v!", "message") + failed := c.Failed() + c.Succeed() + if log := c.GetTestLog(); log != expectedLog { + c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog) + c.Fail() + } + if !failed { + c.Logf("Errorf() didn't put the test in a failed state") + c.Fail() + } +} + +func (s *FoundationS) TestError(c *check.C) { + expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c\\.Error\\(\"Error \", \"message!\"\\)\n"+ + "\\.\\.\\. Error: Error message!\n\n", + getMyLine()+1) + c.Error("Error ", "message!") + checkState(c, nil, + &expectedState{ + name: "Error(`Error `, `message!`)", + failed: true, + log: expectedLog, + }) +} + +func (s *FoundationS) TestFailNow(c *check.C) { + defer (func() { + if !c.Failed() { + c.Error("FailNow() didn't fail the test") + } else { + c.Succeed() + if c.GetTestLog() != "" { + c.Error("Something got logged:\n" + c.GetTestLog()) + } + } + })() + + c.FailNow() + c.Log("FailNow() didn't stop the test") +} + +func (s *FoundationS) TestSucceedNow(c *check.C) { + defer (func() { + if c.Failed() { + c.Error("SucceedNow() didn't succeed the test") + } + if c.GetTestLog() != "" { + c.Error("Something got logged:\n" + c.GetTestLog()) + } + })() + + c.Fail() + c.SucceedNow() + c.Log("SucceedNow() didn't stop the test") +} + +func (s *FoundationS) TestFailureHeader(c *check.C) { + output := String{} + failHelper := FailHelper{} + check.Run(&failHelper, &check.RunConf{Output: &output}) + header := fmt.Sprintf(""+ + "\n-----------------------------------"+ + "-----------------------------------\n"+ + "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n", + failHelper.testLine) + if strings.Index(output.value, header) == -1 { + c.Errorf(""+ + "Failure didn't print a proper header.\n"+ + "... Got:\n%s... Expected something with:\n%s", + output.value, header) + } +} + +func (s *FoundationS) TestFatal(c *check.C) { + var line int + defer (func() { + if !c.Failed() { + c.Error("Fatal() didn't fail the test") + } else { + c.Succeed() + expected := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Fatal(\"Die \", \"now!\")\n"+ + "... Error: Die now!\n\n", + line) + if c.GetTestLog() != expected { + c.Error("Incorrect log:", c.GetTestLog()) + } + } + })() + + line = getMyLine() + 1 + c.Fatal("Die ", "now!") + c.Log("Fatal() didn't stop the test") +} + +func (s *FoundationS) TestFatalf(c *check.C) { + var line int + defer (func() { + if !c.Failed() { + c.Error("Fatalf() didn't fail the test") + } else { + c.Succeed() + expected := fmt.Sprintf("foundation_test.go:%d:\n"+ + " c.Fatalf(\"Die %%s!\", \"now\")\n"+ + "... Error: Die now!\n\n", + line) + if c.GetTestLog() != expected { + c.Error("Incorrect log:", c.GetTestLog()) + } + } + })() + + line = getMyLine() + 1 + c.Fatalf("Die %s!", "now") + c.Log("Fatalf() didn't stop the test") +} + +func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) { + log := fmt.Sprintf(""+ + "foundation_test.go:%d:\n"+ + " result := c.Check\\(10, check.Equals, 20\\)\n"+ + "\\.\\.\\. obtained int = 10\n"+ + "\\.\\.\\. expected int = 20\n\n", + getMyLine()+1) + result := c.Check(10, check.Equals, 20) + checkState(c, result, + &expectedState{ + name: "Check(10, Equals, 20)", + result: false, + failed: true, + log: log, + }) +} + +func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) { + result, line := checkEqualWrapper(c, 10, 20) + testLine := getMyLine() - 1 + log := fmt.Sprintf(""+ + "foundation_test.go:%d:\n"+ + " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+ + "check_test.go:%d:\n"+ + " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+ + "\\.\\.\\. obtained int = 10\n"+ + "\\.\\.\\. expected int = 20\n\n", + testLine, line) + checkState(c, result, + &expectedState{ + name: "Check(10, Equals, 20)", + result: false, + failed: true, + log: log, + }) +} + +// ----------------------------------------------------------------------- +// ExpectFailure() inverts the logic of failure. + +type ExpectFailureSucceedHelper struct{} + +func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) { + c.ExpectFailure("It booms!") + c.Error("Boom!") +} + +type ExpectFailureFailHelper struct{} + +func (s *ExpectFailureFailHelper) TestFail(c *check.C) { + c.ExpectFailure("Bug #XYZ") +} + +func (s *FoundationS) TestExpectFailureFail(c *check.C) { + helper := ExpectFailureFailHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output}) + + expected := "" + + "^\n-+\n" + + "FAIL: foundation_test\\.go:[0-9]+:" + + " ExpectFailureFailHelper\\.TestFail\n\n" + + "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" + + "\\.\\.\\. Reason: Bug #XYZ\n$" + + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("ExpectFailure() didn't log properly:\n", output.value) + } + + c.Assert(result.ExpectedFailures, check.Equals, 0) +} + +func (s *FoundationS) TestExpectFailureSucceed(c *check.C) { + helper := ExpectFailureSucceedHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output}) + + c.Assert(output.value, check.Equals, "") + c.Assert(result.ExpectedFailures, check.Equals, 1) +} + +func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) { + helper := ExpectFailureSucceedHelper{} + output := String{} + result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) + + expected := "" + + "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" + + " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n" + + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("ExpectFailure() didn't log properly:\n", output.value) + } + + c.Assert(result.ExpectedFailures, check.Equals, 1) +} + +// ----------------------------------------------------------------------- +// Skip() allows stopping a test without positive/negative results. + +type SkipTestHelper struct{} + +func (s *SkipTestHelper) TestFail(c *check.C) { + c.Skip("Wrong platform or whatever") + c.Error("Boom!") +} + +func (s *FoundationS) TestSkip(c *check.C) { + helper := SkipTestHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + + if output.value != "" { + c.Error("Skip() logged something:\n", output.value) + } +} + +func (s *FoundationS) TestSkipVerbose(c *check.C) { + helper := SkipTestHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output, Verbose: true}) + + expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" + + " \\(Wrong platform or whatever\\)" + matched, err := regexp.MatchString(expected, output.value) + if err != nil { + c.Error("Bad expression: ", expected) + } else if !matched { + c.Error("Skip() didn't log properly:\n", output.value) + } +} + +// ----------------------------------------------------------------------- +// Check minimum *log.Logger interface provided by *check.C. + +type minLogger interface { + Output(calldepth int, s string) error +} + +func (s *BootstrapS) TestMinLogger(c *check.C) { + var logger minLogger + logger = log.New(os.Stderr, "", 0) + logger = c + logger.Output(0, "Hello there") + expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n` + output := c.GetTestLog() + c.Assert(output, check.Matches, expected) +} + +// ----------------------------------------------------------------------- +// Ensure that suites with embedded types are working fine, including the +// the workaround for issue 906. + +type EmbeddedInternalS struct { + called bool +} + +type EmbeddedS struct { + EmbeddedInternalS +} + +var embeddedS = check.Suite(&EmbeddedS{}) + +func (s *EmbeddedS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +func (s *EmbeddedInternalS) TestMethod(c *check.C) { + c.Error("TestMethod() of the embedded type was called!?") +} + +func (s *EmbeddedS) TestMethod(c *check.C) { + // http://code.google.com/p/go/issues/detail?id=906 + c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner? + s.called = true +} diff --git a/vendor/src/github.com/go-check/check/helpers.go b/vendor/src/github.com/go-check/check/helpers.go new file mode 100644 index 0000000000000..4b6c26da45245 --- /dev/null +++ b/vendor/src/github.com/go-check/check/helpers.go @@ -0,0 +1,231 @@ +package check + +import ( + "fmt" + "strings" + "time" +) + +// TestName returns the current test name in the form "SuiteName.TestName" +func (c *C) TestName() string { + return c.testName +} + +// ----------------------------------------------------------------------- +// Basic succeeding/failing logic. + +// Failed returns whether the currently running test has already failed. +func (c *C) Failed() bool { + return c.status == failedSt +} + +// Fail marks the currently running test as failed. +// +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) Fail() { + c.status = failedSt +} + +// FailNow marks the currently running test as failed and stops running it. +// Something ought to have been previously logged so the developer can tell +// what went wrong. The higher level helper functions will fail the test +// and do the logging properly. +func (c *C) FailNow() { + c.Fail() + c.stopNow() +} + +// Succeed marks the currently running test as succeeded, undoing any +// previous failures. +func (c *C) Succeed() { + c.status = succeededSt +} + +// SucceedNow marks the currently running test as succeeded, undoing any +// previous failures, and stops running the test. +func (c *C) SucceedNow() { + c.Succeed() + c.stopNow() +} + +// ExpectFailure informs that the running test is knowingly broken for +// the provided reason. If the test does not fail, an error will be reported +// to raise attention to this fact. This method is useful to temporarily +// disable tests which cover well known problems until a better time to +// fix the problem is found, without forgetting about the fact that a +// failure still exists. +func (c *C) ExpectFailure(reason string) { + if reason == "" { + panic("Missing reason why the test is expected to fail") + } + c.mustFail = true + c.reason = reason +} + +// Skip skips the running test for the provided reason. If run from within +// SetUpTest, the individual test being set up will be skipped, and if run +// from within SetUpSuite, the whole suite is skipped. +func (c *C) Skip(reason string) { + if reason == "" { + panic("Missing reason why the test is being skipped") + } + c.reason = reason + c.status = skippedSt + c.stopNow() +} + +// ----------------------------------------------------------------------- +// Basic logging. + +// GetTestLog returns the current test error output. +func (c *C) GetTestLog() string { + return c.logb.String() +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Log(args ...interface{}) { + c.log(args...) +} + +// Log logs some information into the test error output. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Logf(format string, args ...interface{}) { + c.logf(format, args...) +} + +// Output enables *C to be used as a logger in functions that require only +// the minimum interface of *log.Logger. +func (c *C) Output(calldepth int, s string) error { + d := time.Now().Sub(c.startTime) + msec := d / time.Millisecond + sec := d / time.Second + min := d / time.Minute + + c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s) + return nil +} + +// Error logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprint. +func (c *C) Error(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.Fail() +} + +// Errorf logs an error into the test error output and marks the test as failed. +// The provided arguments are assembled together into a string with fmt.Sprintf. +func (c *C) Errorf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprintf("Error: "+format, args...)) + c.logNewLine() + c.Fail() +} + +// Fatal logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprint. +func (c *C) Fatal(args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...))) + c.logNewLine() + c.FailNow() +} + +// Fatlaf logs an error into the test error output, marks the test as failed, and +// stops the test execution. The provided arguments are assembled together into +// a string with fmt.Sprintf. +func (c *C) Fatalf(format string, args ...interface{}) { + c.logCaller(1) + c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...))) + c.logNewLine() + c.FailNow() +} + +// ----------------------------------------------------------------------- +// Generic checks and assertions based on checkers. + +// Check verifies if the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution continues. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool { + return c.internalCheck("Check", obtained, checker, args...) +} + +// Assert ensures that the first value matches the expected value according +// to the provided checker. If they do not match, an error is logged, the +// test is marked as failed, and the test execution stops. +// +// Some checkers may not need the expected argument (e.g. IsNil). +// +// Extra arguments provided to the function are logged next to the reported +// problem when the matching fails. +func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) { + if !c.internalCheck("Assert", obtained, checker, args...) { + c.stopNow() + } +} + +func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool { + if checker == nil { + c.logCaller(2) + c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName)) + c.logString("Oops.. you've provided a nil checker!") + c.logNewLine() + c.Fail() + return false + } + + // If the last argument is a bug info, extract it out. + var comment CommentInterface + if len(args) > 0 { + if c, ok := args[len(args)-1].(CommentInterface); ok { + comment = c + args = args[:len(args)-1] + } + } + + params := append([]interface{}{obtained}, args...) + info := checker.Info() + + if len(params) != len(info.Params) { + names := append([]string{info.Params[0], info.Name}, info.Params[1:]...) + c.logCaller(2) + c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", "))) + c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1)) + c.logNewLine() + c.Fail() + return false + } + + // Copy since it may be mutated by Check. + names := append([]string{}, info.Params...) + + // Do the actual check. + result, error := checker.Check(params, names) + if !result || error != "" { + c.logCaller(2) + for i := 0; i != len(params); i++ { + c.logValue(names[i], params[i]) + } + if comment != nil { + c.logString(comment.CheckCommentString()) + } + if error != "" { + c.logString(error) + } + c.logNewLine() + c.Fail() + return false + } + return true +} diff --git a/vendor/src/github.com/go-check/check/helpers_test.go b/vendor/src/github.com/go-check/check/helpers_test.go new file mode 100644 index 0000000000000..4baa656ba8320 --- /dev/null +++ b/vendor/src/github.com/go-check/check/helpers_test.go @@ -0,0 +1,519 @@ +// These tests verify the inner workings of the helper methods associated +// with check.T. + +package check_test + +import ( + "gopkg.in/check.v1" + "os" + "reflect" + "runtime" + "sync" +) + +var helpersS = check.Suite(&HelpersS{}) + +type HelpersS struct{} + +func (s *HelpersS) TestCountSuite(c *check.C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Fake checker and bug info to verify the behavior of Assert() and Check(). + +type MyChecker struct { + info *check.CheckerInfo + params []interface{} + names []string + result bool + error string +} + +func (checker *MyChecker) Info() *check.CheckerInfo { + if checker.info == nil { + return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}} + } + return checker.info +} + +func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) { + rparams := checker.params + rnames := checker.names + checker.params = append([]interface{}{}, params...) + checker.names = append([]string{}, names...) + if rparams != nil { + copy(params, rparams) + } + if rnames != nil { + copy(names, rnames) + } + return checker.result, checker.error +} + +type myCommentType string + +func (c myCommentType) CheckCommentString() string { + return string(c) +} + +func myComment(s string) myCommentType { + return myCommentType(s) +} + +// ----------------------------------------------------------------------- +// Ensure a real checker actually works fine. + +func (s *HelpersS) TestCheckerInterface(c *check.C) { + testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} { + return c.Check(1, check.Equals, 1) + }) +} + +// ----------------------------------------------------------------------- +// Tests for Check(), mostly the same as for Assert() following these. + +func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) { + checker := &MyChecker{result: true} + testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} { + return c.Check(1, checker, 2) + }) + if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) { + checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + testHelperSuccess(c, "Check(1, checker)", true, func() interface{} { + return c.Check(1, checker) + }) + if !reflect.DeepEqual(checker.params, []interface{}{1}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestCheckFailWithExpected(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Check(1, checker, 2)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2, myComment("Hello world!")) + }) +} + +func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " // Nice leading comment\\.\n" + + " return c\\.Check\\(1, checker, 2\\) // Hello there\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log, + func() interface{} { + // Nice leading comment. + return c.Check(1, checker, 2) // Hello there + }) +} + +func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker\\)\n" + + "\\.+ myvalue int = 1\n\n" + testHelperFailure(c, "Check(1, checker)", false, false, log, + func() interface{} { + return c.Check(1, checker) + }) +} + +func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myvalue int = 1\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Check(1, checker, msg)", false, false, log, + func() interface{} { + return c.Check(1, checker, myComment("Hello world!")) + }) +} + +func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker\\)\n" + + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 2\n\n" + testHelperFailure(c, "Check(1, checker, !?)", false, false, log, + func() interface{} { + return c.Check(1, checker) + }) +} + +func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2, 3\\)\n" + + "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 4\n\n" + testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2, 3) + }) +} + +func (s *HelpersS) TestCheckWithError(c *check.C) { + checker := &MyChecker{result: false, error: "Some not so cool data provided!"} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Some not so cool data provided!\n\n" + testHelperFailure(c, "Check(1, checker, 2)", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +func (s *HelpersS) TestCheckWithNilChecker(c *check.C) { + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, nil\\)\n" + + "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" + testHelperFailure(c, "Check(obtained, nil)", false, false, log, + func() interface{} { + return c.Check(1, nil) + }) +} + +func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) { + checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " return c\\.Check\\(1, checker, 2\\)\n" + + "\\.+ newobtained int = 3\n" + + "\\.+ newexpected int = 4\n\n" + testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log, + func() interface{} { + return c.Check(1, checker, 2) + }) +} + +// ----------------------------------------------------------------------- +// Tests for Assert(), mostly the same as for Check() above. + +func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) { + checker := &MyChecker{result: true} + testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} { + c.Assert(1, checker, 2) + return nil + }) + if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) { + checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} { + c.Assert(1, checker) + return nil + }) + if !reflect.DeepEqual(checker.params, []interface{}{1}) { + c.Fatalf("Bad params for check: %#v", checker.params) + } +} + +func (s *HelpersS) TestAssertFailWithExpected(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n\n" + testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2, myComment("Hello world!")) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker\\)\n" + + "\\.+ myvalue int = 1\n\n" + testHelperFailure(c, "Assert(1, checker)", nil, true, log, + func() interface{} { + c.Assert(1, checker) + return nil + }) +} + +func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) { + checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" + + "\\.+ myvalue int = 1\n" + + "\\.+ Hello world!\n\n" + testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log, + func() interface{} { + c.Assert(1, checker, myComment("Hello world!")) + return nil + }) +} + +func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) { + checker := &MyChecker{result: true} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker\\)\n" + + "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" + + "\\.+ Wrong number of parameters for MyChecker: " + + "want 3, got 2\n\n" + testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log, + func() interface{} { + c.Assert(1, checker) + return nil + }) +} + +func (s *HelpersS) TestAssertWithError(c *check.C) { + checker := &MyChecker{result: false, error: "Some not so cool data provided!"} + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, checker, 2\\)\n" + + "\\.+ myobtained int = 1\n" + + "\\.+ myexpected int = 2\n" + + "\\.+ Some not so cool data provided!\n\n" + testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log, + func() interface{} { + c.Assert(1, checker, 2) + return nil + }) +} + +func (s *HelpersS) TestAssertWithNilChecker(c *check.C) { + log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" + + " c\\.Assert\\(1, nil\\)\n" + + "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" + + "\\.+ Oops\\.\\. you've provided a nil checker!\n\n" + testHelperFailure(c, "Assert(obtained, nil)", nil, true, log, + func() interface{} { + c.Assert(1, nil) + return nil + }) +} + +// ----------------------------------------------------------------------- +// Ensure that values logged work properly in some interesting cases. + +func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" + + "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" + + "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n" + testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log, + func() interface{} { + return c.Check([]byte{1, 2}, checker, []byte{1, 3}) + }) +} + +func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) { + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" + + "\\.+ myobtained string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\\\\n\"\n" + + "\\.+ myexpected string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\\\\n\" \\+\n" + + "\\.+ \"c\"\n\n" + testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log, + func() interface{} { + return c.Check("a\nb\n", checker, "a\nb\nc") + }) +} + +func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) { + // If the newline is at the end of the string, don't log as multi-line. + checker := &MyChecker{result: false} + log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" + + " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" + + "\\.+ myobtained string = \"a b\\\\n\"\n" + + "\\.+ myexpected string = \"\" \\+\n" + + "\\.+ \"a\\\\n\" \\+\n" + + "\\.+ \"b\"\n\n" + testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log, + func() interface{} { + return c.Check("a b\n", checker, "a\nb") + }) +} + +// ----------------------------------------------------------------------- +// MakeDir() tests. + +type MkDirHelper struct { + path1 string + path2 string + isDir1 bool + isDir2 bool + isDir3 bool + isDir4 bool +} + +func (s *MkDirHelper) SetUpSuite(c *check.C) { + s.path1 = c.MkDir() + s.isDir1 = isDir(s.path1) +} + +func (s *MkDirHelper) Test(c *check.C) { + s.path2 = c.MkDir() + s.isDir2 = isDir(s.path2) +} + +func (s *MkDirHelper) TearDownSuite(c *check.C) { + s.isDir3 = isDir(s.path1) + s.isDir4 = isDir(s.path2) +} + +func (s *HelpersS) TestMkDir(c *check.C) { + helper := MkDirHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + c.Assert(output.value, check.Equals, "") + c.Check(helper.isDir1, check.Equals, true) + c.Check(helper.isDir2, check.Equals, true) + c.Check(helper.isDir3, check.Equals, true) + c.Check(helper.isDir4, check.Equals, true) + c.Check(helper.path1, check.Not(check.Equals), + helper.path2) + c.Check(isDir(helper.path1), check.Equals, false) + c.Check(isDir(helper.path2), check.Equals, false) +} + +func isDir(path string) bool { + if stat, err := os.Stat(path); err == nil { + return stat.IsDir() + } + return false +} + +// Concurrent logging should not corrupt the underling buffer. +// Use go test -race to detect the race in this test. +func (s *HelpersS) TestConcurrentLogging(c *check.C) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) + var start, stop sync.WaitGroup + start.Add(1) + for i, n := 0, runtime.NumCPU()*2; i < n; i++ { + stop.Add(1) + go func(i int) { + start.Wait() + for j := 0; j < 30; j++ { + c.Logf("Worker %d: line %d", i, j) + } + stop.Done() + }(i) + } + start.Done() + stop.Wait() +} + +// ----------------------------------------------------------------------- +// Test the TestName function + +type TestNameHelper struct { + name1 string + name2 string + name3 string + name4 string + name5 string +} + +func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() } +func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() } +func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() } +func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() } +func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() } + +func (s *HelpersS) TestTestName(c *check.C) { + helper := TestNameHelper{} + output := String{} + check.Run(&helper, &check.RunConf{Output: &output}) + c.Check(helper.name1, check.Equals, "") + c.Check(helper.name2, check.Equals, "TestNameHelper.Test") + c.Check(helper.name3, check.Equals, "TestNameHelper.Test") + c.Check(helper.name4, check.Equals, "TestNameHelper.Test") + c.Check(helper.name5, check.Equals, "") +} + +// ----------------------------------------------------------------------- +// A couple of helper functions to test helper functions. :-) + +func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) { + var result interface{} + defer (func() { + if err := recover(); err != nil { + panic(err) + } + checkState(c, result, + &expectedState{ + name: name, + result: expectedResult, + failed: false, + log: "", + }) + })() + result = closure() +} + +func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) { + var result interface{} + defer (func() { + if err := recover(); err != nil { + panic(err) + } + checkState(c, result, + &expectedState{ + name: name, + result: expectedResult, + failed: true, + log: log, + }) + })() + result = closure() + if shouldStop { + c.Logf("%s didn't stop when it should", name) + } +} diff --git a/vendor/src/github.com/go-check/check/printer.go b/vendor/src/github.com/go-check/check/printer.go new file mode 100644 index 0000000000000..e0f7557b5cc76 --- /dev/null +++ b/vendor/src/github.com/go-check/check/printer.go @@ -0,0 +1,168 @@ +package check + +import ( + "bytes" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "os" +) + +func indent(s, with string) (r string) { + eol := true + for i := 0; i != len(s); i++ { + c := s[i] + switch { + case eol && c == '\n' || c == '\r': + case c == '\n' || c == '\r': + eol = true + case eol: + eol = false + s = s[:i] + with + s[i:] + i += len(with) + } + } + return s +} + +func printLine(filename string, line int) (string, error) { + fset := token.NewFileSet() + file, err := os.Open(filename) + if err != nil { + return "", err + } + fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments) + if err != nil { + return "", err + } + config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4} + lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config} + ast.Walk(lp, fnode) + result := lp.output.Bytes() + // Comments leave \n at the end. + n := len(result) + for n > 0 && result[n-1] == '\n' { + n-- + } + return string(result[:n]), nil +} + +type linePrinter struct { + config *printer.Config + fset *token.FileSet + fnode *ast.File + line int + output bytes.Buffer + stmt ast.Stmt +} + +func (lp *linePrinter) emit() bool { + if lp.stmt != nil { + lp.trim(lp.stmt) + lp.printWithComments(lp.stmt) + lp.stmt = nil + return true + } + return false +} + +func (lp *linePrinter) printWithComments(n ast.Node) { + nfirst := lp.fset.Position(n.Pos()).Line + nlast := lp.fset.Position(n.End()).Line + for _, g := range lp.fnode.Comments { + cfirst := lp.fset.Position(g.Pos()).Line + clast := lp.fset.Position(g.End()).Line + if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column { + for _, c := range g.List { + lp.output.WriteString(c.Text) + lp.output.WriteByte('\n') + } + } + if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash { + // The printer will not include the comment if it starts past + // the node itself. Trick it into printing by overlapping the + // slash with the end of the statement. + g.List[0].Slash = n.End() - 1 + } + } + node := &printer.CommentedNode{n, lp.fnode.Comments} + lp.config.Fprint(&lp.output, lp.fset, node) +} + +func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) { + if n == nil { + if lp.output.Len() == 0 { + lp.emit() + } + return nil + } + first := lp.fset.Position(n.Pos()).Line + last := lp.fset.Position(n.End()).Line + if first <= lp.line && last >= lp.line { + // Print the innermost statement containing the line. + if stmt, ok := n.(ast.Stmt); ok { + if _, ok := n.(*ast.BlockStmt); !ok { + lp.stmt = stmt + } + } + if first == lp.line && lp.emit() { + return nil + } + return lp + } + return nil +} + +func (lp *linePrinter) trim(n ast.Node) bool { + stmt, ok := n.(ast.Stmt) + if !ok { + return true + } + line := lp.fset.Position(n.Pos()).Line + if line != lp.line { + return false + } + switch stmt := stmt.(type) { + case *ast.IfStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.SwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.TypeSwitchStmt: + stmt.Body = lp.trimBlock(stmt.Body) + case *ast.CaseClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.CommClause: + stmt.Body = lp.trimList(stmt.Body) + case *ast.BlockStmt: + stmt.List = lp.trimList(stmt.List) + } + return true +} + +func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt { + if !lp.trim(stmt) { + return lp.emptyBlock(stmt) + } + stmt.Rbrace = stmt.Lbrace + return stmt +} + +func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt { + for i := 0; i != len(stmts); i++ { + if !lp.trim(stmts[i]) { + stmts[i] = lp.emptyStmt(stmts[i]) + break + } + } + return stmts +} + +func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt { + return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}} +} + +func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt { + p := n.Pos() + return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p} +} diff --git a/vendor/src/github.com/go-check/check/printer_test.go b/vendor/src/github.com/go-check/check/printer_test.go new file mode 100644 index 0000000000000..538b2d52e3b7e --- /dev/null +++ b/vendor/src/github.com/go-check/check/printer_test.go @@ -0,0 +1,104 @@ +package check_test + +import ( + . "gopkg.in/check.v1" +) + +var _ = Suite(&PrinterS{}) + +type PrinterS struct{} + +func (s *PrinterS) TestCountSuite(c *C) { + suitesRun += 1 +} + +var printTestFuncLine int + +func init() { + printTestFuncLine = getMyLine() + 3 +} + +func printTestFunc() { + println(1) // Comment1 + if 2 == 2 { // Comment2 + println(3) // Comment3 + } + switch 5 { + case 6: println(6) // Comment6 + println(7) + } + switch interface{}(9).(type) {// Comment9 + case int: println(10) + println(11) + } + select { + case <-(chan bool)(nil): println(14) + println(15) + default: println(16) + println(17) + } + println(19, + 20) + _ = func() { println(21) + println(22) + } + println(24, func() { + println(25) + }) + // Leading comment + // with multiple lines. + println(29) // Comment29 +} + +var printLineTests = []struct { + line int + output string +}{ + {1, "println(1) // Comment1"}, + {2, "if 2 == 2 { // Comment2\n ...\n}"}, + {3, "println(3) // Comment3"}, + {5, "switch 5 {\n...\n}"}, + {6, "case 6:\n println(6) // Comment6\n ..."}, + {7, "println(7)"}, + {9, "switch interface{}(9).(type) { // Comment9\n...\n}"}, + {10, "case int:\n println(10)\n ..."}, + {14, "case <-(chan bool)(nil):\n println(14)\n ..."}, + {15, "println(15)"}, + {16, "default:\n println(16)\n ..."}, + {17, "println(17)"}, + {19, "println(19,\n 20)"}, + {20, "println(19,\n 20)"}, + {21, "_ = func() {\n println(21)\n println(22)\n}"}, + {22, "println(22)"}, + {24, "println(24, func() {\n println(25)\n})"}, + {25, "println(25)"}, + {26, "println(24, func() {\n println(25)\n})"}, + {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"}, +} + +func (s *PrinterS) TestPrintLine(c *C) { + for _, test := range printLineTests { + output, err := PrintLine("printer_test.go", printTestFuncLine+test.line) + c.Assert(err, IsNil) + c.Assert(output, Equals, test.output) + } +} + +var indentTests = []struct { + in, out string +}{ + {"", ""}, + {"\n", "\n"}, + {"a", ">>>a"}, + {"a\n", ">>>a\n"}, + {"a\nb", ">>>a\n>>>b"}, + {" ", ">>> "}, +} + +func (s *PrinterS) TestIndent(c *C) { + for _, test := range indentTests { + out := Indent(test.in, ">>>") + c.Assert(out, Equals, test.out) + } + +} diff --git a/vendor/src/github.com/go-check/check/run.go b/vendor/src/github.com/go-check/check/run.go new file mode 100644 index 0000000000000..da8fd79872997 --- /dev/null +++ b/vendor/src/github.com/go-check/check/run.go @@ -0,0 +1,175 @@ +package check + +import ( + "bufio" + "flag" + "fmt" + "os" + "testing" + "time" +) + +// ----------------------------------------------------------------------- +// Test suite registry. + +var allSuites []interface{} + +// Suite registers the given value as a test suite to be run. Any methods +// starting with the Test prefix in the given value will be considered as +// a test method. +func Suite(suite interface{}) interface{} { + allSuites = append(allSuites, suite) + return suite +} + +// ----------------------------------------------------------------------- +// Public running interface. + +var ( + oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run") + oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode") + oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)") + oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks") + oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark") + oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run") + oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory") + + newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run") + newVerboseFlag = flag.Bool("check.v", false, "Verbose mode") + newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)") + newBenchFlag = flag.Bool("check.b", false, "Run benchmarks") + newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark") + newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks") + newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run") + newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory") +) + +// TestingT runs all test suites registered with the Suite function, +// printing results to stdout, and reporting any failures back to +// the "testing" package. +func TestingT(testingT *testing.T) { + benchTime := *newBenchTime + if benchTime == 1*time.Second { + benchTime = *oldBenchTime + } + conf := &RunConf{ + Filter: *oldFilterFlag + *newFilterFlag, + Verbose: *oldVerboseFlag || *newVerboseFlag, + Stream: *oldStreamFlag || *newStreamFlag, + Benchmark: *oldBenchFlag || *newBenchFlag, + BenchmarkTime: benchTime, + BenchmarkMem: *newBenchMem, + KeepWorkDir: *oldWorkFlag || *newWorkFlag, + } + if *oldListFlag || *newListFlag { + w := bufio.NewWriter(os.Stdout) + for _, name := range ListAll(conf) { + fmt.Fprintln(w, name) + } + w.Flush() + return + } + result := RunAll(conf) + println(result.String()) + if !result.Passed() { + testingT.Fail() + } +} + +// RunAll runs all test suites registered with the Suite function, using the +// provided run configuration. +func RunAll(runConf *RunConf) *Result { + result := Result{} + for _, suite := range allSuites { + result.Add(Run(suite, runConf)) + } + return &result +} + +// Run runs the provided test suite using the provided run configuration. +func Run(suite interface{}, runConf *RunConf) *Result { + runner := newSuiteRunner(suite, runConf) + return runner.run() +} + +// ListAll returns the names of all the test functions registered with the +// Suite function that will be run with the provided run configuration. +func ListAll(runConf *RunConf) []string { + var names []string + for _, suite := range allSuites { + names = append(names, List(suite, runConf)...) + } + return names +} + +// List returns the names of the test functions in the given +// suite that will be run with the provided run configuration. +func List(suite interface{}, runConf *RunConf) []string { + var names []string + runner := newSuiteRunner(suite, runConf) + for _, t := range runner.tests { + names = append(names, t.String()) + } + return names +} + +// ----------------------------------------------------------------------- +// Result methods. + +func (r *Result) Add(other *Result) { + r.Succeeded += other.Succeeded + r.Skipped += other.Skipped + r.Failed += other.Failed + r.Panicked += other.Panicked + r.FixturePanicked += other.FixturePanicked + r.ExpectedFailures += other.ExpectedFailures + r.Missed += other.Missed + if r.WorkDir != "" && other.WorkDir != "" { + r.WorkDir += ":" + other.WorkDir + } else if other.WorkDir != "" { + r.WorkDir = other.WorkDir + } +} + +func (r *Result) Passed() bool { + return (r.Failed == 0 && r.Panicked == 0 && + r.FixturePanicked == 0 && r.Missed == 0 && + r.RunError == nil) +} + +func (r *Result) String() string { + if r.RunError != nil { + return "ERROR: " + r.RunError.Error() + } + + var value string + if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 && + r.Missed == 0 { + value = "OK: " + } else { + value = "OOPS: " + } + value += fmt.Sprintf("%d passed", r.Succeeded) + if r.Skipped != 0 { + value += fmt.Sprintf(", %d skipped", r.Skipped) + } + if r.ExpectedFailures != 0 { + value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures) + } + if r.Failed != 0 { + value += fmt.Sprintf(", %d FAILED", r.Failed) + } + if r.Panicked != 0 { + value += fmt.Sprintf(", %d PANICKED", r.Panicked) + } + if r.FixturePanicked != 0 { + value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked) + } + if r.Missed != 0 { + value += fmt.Sprintf(", %d MISSED", r.Missed) + } + if r.WorkDir != "" { + value += "\nWORK=" + r.WorkDir + } + return value +} diff --git a/vendor/src/github.com/go-check/check/run_test.go b/vendor/src/github.com/go-check/check/run_test.go new file mode 100644 index 0000000000000..f41fffc3f5b56 --- /dev/null +++ b/vendor/src/github.com/go-check/check/run_test.go @@ -0,0 +1,419 @@ +// These tests verify the test running logic. + +package check_test + +import ( + "errors" + . "gopkg.in/check.v1" + "os" + "sync" +) + +var runnerS = Suite(&RunS{}) + +type RunS struct{} + +func (s *RunS) TestCountSuite(c *C) { + suitesRun += 1 +} + +// ----------------------------------------------------------------------- +// Tests ensuring result counting works properly. + +func (s *RunS) TestSuccess(c *C) { + output := String{} + result := Run(&SuccessHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 1) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestFailure(c *C) { + output := String{} + result := Run(&FailHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 1) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestFixture(c *C) { + output := String{} + result := Run(&FixtureHelper{}, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 2) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnTest(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "Test1"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 1) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 1) + c.Check(result.FixturePanicked, Equals, 0) + c.Check(result.Missed, Equals, 0) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnSetUpTest(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "SetUpTest"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 1) + c.Check(result.Missed, Equals, 2) + c.Check(result.RunError, IsNil) +} + +func (s *RunS) TestPanicOnSetUpSuite(c *C) { + output := String{} + helper := &FixtureHelper{panicOn: "SetUpSuite"} + result := Run(helper, &RunConf{Output: &output}) + c.Check(result.Succeeded, Equals, 0) + c.Check(result.Failed, Equals, 0) + c.Check(result.Skipped, Equals, 0) + c.Check(result.Panicked, Equals, 0) + c.Check(result.FixturePanicked, Equals, 1) + c.Check(result.Missed, Equals, 2) + c.Check(result.RunError, IsNil) +} + +// ----------------------------------------------------------------------- +// Check result aggregation. + +func (s *RunS) TestAdd(c *C) { + result := &Result{ + Succeeded: 1, + Skipped: 2, + Failed: 3, + Panicked: 4, + FixturePanicked: 5, + Missed: 6, + ExpectedFailures: 7, + } + result.Add(&Result{ + Succeeded: 10, + Skipped: 20, + Failed: 30, + Panicked: 40, + FixturePanicked: 50, + Missed: 60, + ExpectedFailures: 70, + }) + c.Check(result.Succeeded, Equals, 11) + c.Check(result.Skipped, Equals, 22) + c.Check(result.Failed, Equals, 33) + c.Check(result.Panicked, Equals, 44) + c.Check(result.FixturePanicked, Equals, 55) + c.Check(result.Missed, Equals, 66) + c.Check(result.ExpectedFailures, Equals, 77) + c.Check(result.RunError, IsNil) +} + +// ----------------------------------------------------------------------- +// Check the Passed() method. + +func (s *RunS) TestPassed(c *C) { + c.Assert((&Result{}).Passed(), Equals, true) + c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true) + c.Assert((&Result{Skipped: 1}).Passed(), Equals, true) + c.Assert((&Result{Failed: 1}).Passed(), Equals, false) + c.Assert((&Result{Panicked: 1}).Passed(), Equals, false) + c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false) + c.Assert((&Result{Missed: 1}).Passed(), Equals, false) + c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false) +} + +// ----------------------------------------------------------------------- +// Check that result printing is working correctly. + +func (s *RunS) TestPrintSuccess(c *C) { + result := &Result{Succeeded: 5} + c.Check(result.String(), Equals, "OK: 5 passed") +} + +func (s *RunS) TestPrintFailure(c *C) { + result := &Result{Failed: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED") +} + +func (s *RunS) TestPrintSkipped(c *C) { + result := &Result{Skipped: 5} + c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped") +} + +func (s *RunS) TestPrintExpectedFailures(c *C) { + result := &Result{ExpectedFailures: 5} + c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures") +} + +func (s *RunS) TestPrintPanicked(c *C) { + result := &Result{Panicked: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED") +} + +func (s *RunS) TestPrintFixturePanicked(c *C) { + result := &Result{FixturePanicked: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED") +} + +func (s *RunS) TestPrintMissed(c *C) { + result := &Result{Missed: 5} + c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED") +} + +func (s *RunS) TestPrintAll(c *C) { + result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3, + Panicked: 4, FixturePanicked: 5, Missed: 6} + c.Check(result.String(), Equals, + "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+ + "5 FIXTURE-PANICKED, 6 MISSED") +} + +func (s *RunS) TestPrintRunError(c *C) { + result := &Result{Succeeded: 1, Failed: 1, + RunError: errors.New("Kaboom!")} + c.Check(result.String(), Equals, "ERROR: Kaboom!") +} + +// ----------------------------------------------------------------------- +// Verify that the method pattern flag works correctly. + +func (s *RunS) TestFilterTestName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "Test[91]"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) +} + +func (s *RunS) TestFilterTestNameWithAll(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: ".*"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterSuiteName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "FixtureHelper"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test1") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "SetUpTest") + c.Check(helper.calls[5], Equals, "Test2") + c.Check(helper.calls[6], Equals, "TearDownTest") + c.Check(helper.calls[7], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterSuiteNameAndTestName(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"} + Run(&helper, &runConf) + c.Check(helper.calls[0], Equals, "SetUpSuite") + c.Check(helper.calls[1], Equals, "SetUpTest") + c.Check(helper.calls[2], Equals, "Test2") + c.Check(helper.calls[3], Equals, "TearDownTest") + c.Check(helper.calls[4], Equals, "TearDownSuite") + c.Check(len(helper.calls), Equals, 5) +} + +func (s *RunS) TestFilterAllOut(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "NotFound"} + Run(&helper, &runConf) + c.Check(len(helper.calls), Equals, 0) +} + +func (s *RunS) TestRequirePartialMatch(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "est"} + Run(&helper, &runConf) + c.Check(len(helper.calls), Equals, 8) +} + +func (s *RunS) TestFilterError(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Filter: "]["} + result := Run(&helper, &runConf) + c.Check(result.String(), Equals, + "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`") + c.Check(len(helper.calls), Equals, 0) +} + +// ----------------------------------------------------------------------- +// Verify that List works correctly. + +func (s *RunS) TestListFiltered(c *C) { + names := List(&FixtureHelper{}, &RunConf{Filter: "1"}) + c.Assert(names, DeepEquals, []string{ + "FixtureHelper.Test1", + }) +} + +func (s *RunS) TestList(c *C) { + names := List(&FixtureHelper{}, &RunConf{}) + c.Assert(names, DeepEquals, []string{ + "FixtureHelper.Test1", + "FixtureHelper.Test2", + }) +} + +// ----------------------------------------------------------------------- +// Verify that verbose mode prints tests which pass as well. + +func (s *RunS) TestVerboseMode(c *C) { + helper := FixtureHelper{} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" + + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" + + c.Assert(output.value, Matches, expected) +} + +func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) { + helper := FixtureHelper{panicOn: "Test1"} + output := String{} + runConf := RunConf{Output: &output, Verbose: true} + Run(&helper, &runConf) + + expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line. + "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n" + + c.Assert(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Verify the stream output mode. In this mode there's no output caching. + +type StreamHelper struct { + l2 sync.Mutex + l3 sync.Mutex +} + +func (s *StreamHelper) SetUpSuite(c *C) { + c.Log("0") +} + +func (s *StreamHelper) Test1(c *C) { + c.Log("1") + s.l2.Lock() + s.l3.Lock() + go func() { + s.l2.Lock() // Wait for "2". + c.Log("3") + s.l3.Unlock() + }() +} + +func (s *StreamHelper) Test2(c *C) { + c.Log("2") + s.l2.Unlock() + s.l3.Lock() // Wait for "3". + c.Fail() + c.Log("4") +} + +func (s *RunS) TestStreamMode(c *C) { + helper := &StreamHelper{} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(helper, &runConf) + + expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" + + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" + + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" + + "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" + + "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" + + "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n" + + c.Assert(output.value, Matches, expected) +} + +type StreamMissHelper struct{} + +func (s *StreamMissHelper) SetUpSuite(c *C) { + c.Log("0") + c.Fail() +} + +func (s *StreamMissHelper) Test1(c *C) { + c.Log("1") +} + +func (s *RunS) TestStreamModeWithMiss(c *C) { + helper := &StreamMissHelper{} + output := String{} + runConf := RunConf{Output: &output, Stream: true} + Run(helper, &runConf) + + expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" + + "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" + + "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" + + "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n" + + c.Assert(output.value, Matches, expected) +} + +// ----------------------------------------------------------------------- +// Verify that that the keep work dir request indeed does so. + +type WorkDirSuite struct {} + +func (s *WorkDirSuite) Test(c *C) { + c.MkDir() +} + +func (s *RunS) TestKeepWorkDir(c *C) { + output := String{} + runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true} + result := Run(&WorkDirSuite{}, &runConf) + + c.Assert(result.String(), Matches, ".*\nWORK=" + result.WorkDir) + + stat, err := os.Stat(result.WorkDir) + c.Assert(err, IsNil) + c.Assert(stat.IsDir(), Equals, true) +} diff --git a/vendor/src/github.com/gorilla/mux/mux.go b/vendor/src/github.com/gorilla/mux/mux.go index 8b23c39d397d0..5b5f8e7db5dc1 100644 --- a/vendor/src/github.com/gorilla/mux/mux.go +++ b/vendor/src/github.com/gorilla/mux/mux.go @@ -87,10 +87,10 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { setCurrentRoute(req, match.Route) } if handler == nil { - if r.NotFoundHandler == nil { - r.NotFoundHandler = http.NotFoundHandler() - } handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } } if !r.KeepContext { defer context.Clear(req) diff --git a/vendor/src/github.com/gorilla/mux/mux_test.go b/vendor/src/github.com/gorilla/mux/mux_test.go index 0e2e48067ae30..e455bce8fdfee 100644 --- a/vendor/src/github.com/gorilla/mux/mux_test.go +++ b/vendor/src/github.com/gorilla/mux/mux_test.go @@ -462,6 +462,15 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: true, }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, { title: "Queries route, bad query", route: new(Route).Queries("foo", "bar", "baz", "ding"), @@ -471,6 +480,42 @@ func TestQueries(t *testing.T) { path: "", shouldMatch: false, }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, } for _, test := range tests { diff --git a/vendor/src/github.com/gorilla/mux/old_test.go b/vendor/src/github.com/gorilla/mux/old_test.go index 42530590e7788..1f7c190c0f972 100644 --- a/vendor/src/github.com/gorilla/mux/old_test.go +++ b/vendor/src/github.com/gorilla/mux/old_test.go @@ -329,35 +329,6 @@ var pathMatcherTests = []pathMatcherTest{ }, } -type queryMatcherTest struct { - matcher queryMatcher - url string - result bool -} - -var queryMatcherTests = []queryMatcherTest{ - { - matcher: queryMatcher(map[string]string{"foo": "bar", "baz": "ding"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: true, - }, - { - matcher: queryMatcher(map[string]string{"foo": "", "baz": ""}), - url: "http://localhost:8080/?foo=anything&baz=anything", - result: true, - }, - { - matcher: queryMatcher(map[string]string{"foo": "ding", "baz": "bar"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: false, - }, - { - matcher: queryMatcher(map[string]string{"bar": "foo", "ding": "baz"}), - url: "http://localhost:8080/?foo=bar&baz=ding", - result: false, - }, -} - type schemeMatcherTest struct { matcher schemeMatcher url string @@ -519,23 +490,8 @@ func TestPathMatcher(t *testing.T) { } } -func TestQueryMatcher(t *testing.T) { - for _, v := range queryMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - func TestSchemeMatcher(t *testing.T) { - for _, v := range queryMatcherTests { + for _, v := range schemeMatcherTests { request, _ := http.NewRequest("GET", v.url, nil) var routeMatch RouteMatch result := v.matcher.Match(request, &routeMatch) @@ -735,7 +691,7 @@ func TestNewRegexp(t *testing.T) { } for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false) + p, _ = newRouteRegexp(pattern, false, false, false, false) for path, result := range paths { matches = p.regexp.FindStringSubmatch(path) if result == nil { diff --git a/vendor/src/github.com/gorilla/mux/regexp.go b/vendor/src/github.com/gorilla/mux/regexp.go index 925f268abefa4..a6305483d5ae5 100644 --- a/vendor/src/github.com/gorilla/mux/regexp.go +++ b/vendor/src/github.com/gorilla/mux/regexp.go @@ -14,7 +14,7 @@ import ( ) // newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host or path. +// used to match a host, a path or a query string. // // It will extract named variables, assemble a regexp to be matched, create // a "reverse" template to build URLs and compile regexps to validate variable @@ -23,7 +23,7 @@ import ( // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*routeRegexp, error) { +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { @@ -33,11 +33,15 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout template := tpl // Now let's parse it. defaultPattern := "[^/]+" - if matchHost { + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { defaultPattern = "[^.]+" - matchPrefix, strictSlash = false, false + matchPrefix = false } - if matchPrefix { + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { strictSlash = false } // Set a flag for strictSlash. @@ -48,7 +52,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout } varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("^") + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } reverse := bytes.NewBufferString("") var end int var err error @@ -100,6 +107,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout return &routeRegexp{ template: template, matchHost: matchHost, + matchQuery: matchQuery, strictSlash: strictSlash, regexp: reg, reverse: reverse.String(), @@ -113,8 +121,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, strictSlash bool) (*rout type routeRegexp struct { // The unmodified template. template string - // True for host match, false for path match. + // True for host match, false for path or query string match. matchHost bool + // True for query string match, false for path and host match. + matchQuery bool // The strictSlash value defined on the route, but disabled if PathPrefix was used. strictSlash bool // Expanded regexp. @@ -130,7 +140,11 @@ type routeRegexp struct { // Match matches the regexp against the URL host or path. func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { - return r.regexp.MatchString(req.URL.Path) + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } } return r.regexp.MatchString(getHost(req)) } @@ -196,8 +210,9 @@ func braceIndices(s string) ([]int, error) { // routeRegexpGroup groups the route matchers that carry variables. type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp + host *routeRegexp + path *routeRegexp + queries []*routeRegexp } // setMatch extracts the variables from the URL once a route matches. @@ -234,17 +249,28 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } } } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } } // getHost tries its best to return the request host. func getHost(r *http.Request) string { - if !r.URL.IsAbs() { - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host + if r.URL.IsAbs() { + return r.URL.Host } - return r.URL.Host + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + } diff --git a/vendor/src/github.com/gorilla/mux/route.go b/vendor/src/github.com/gorilla/mux/route.go index 5cb2526d61390..c310e66bc7c3f 100644 --- a/vendor/src/github.com/gorilla/mux/route.go +++ b/vendor/src/github.com/gorilla/mux/route.go @@ -135,12 +135,12 @@ func (r *Route) addMatcher(m matcher) *Route { } // addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error { +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { if r.err != nil { return r.err } r.regexp = r.getRegexpGroup() - if !matchHost { + if !matchHost && !matchQuery { if len(tpl) == 0 || tpl[0] != '/' { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) } @@ -148,10 +148,15 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, r.strictSlash) + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) if err != nil { return err } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } if matchHost { if r.regexp.path != nil { if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { @@ -165,7 +170,11 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix bool) error return err } } - r.regexp.path = rr + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } } r.addMatcher(rr) return nil @@ -219,7 +228,7 @@ func (r *Route) Headers(pairs ...string) *Route { // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false) + r.err = r.addRegexpMatcher(tpl, true, false, false) return r } @@ -278,7 +287,7 @@ func (r *Route) Methods(methods ...string) *Route { // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false) + r.err = r.addRegexpMatcher(tpl, false, false, false) return r } @@ -294,35 +303,42 @@ func (r *Route) Path(tpl string) *Route { // Also note that the setting of Router.StrictSlash() has no effect on routes // with a PathPrefix matcher. func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true) + r.err = r.addRegexpMatcher(tpl, false, true, false) return r } // Query ---------------------------------------------------------------------- -// queryMatcher matches the request against URL queries. -type queryMatcher map[string]string - -func (m queryMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMap(m, r.URL.Query(), false) -} - // Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. For example: +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: // // r := mux.NewRouter() -// r.Queries("foo", "bar", "baz", "ding") +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&baz=ding. +// values, e.g.: ?foo=bar&id=42. // // It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. func (r *Route) Queries(pairs ...string) *Route { - if r.err == nil { - var queries map[string]string - queries, r.err = mapFromPairs(pairs...) - return r.addMatcher(queryMatcher(queries)) + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } } + return r } @@ -498,8 +514,9 @@ func (r *Route) getRegexpGroup() *routeRegexpGroup { } else { // Copy. r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, + host: regexp.host, + path: regexp.path, + queries: regexp.queries, } } } diff --git a/volumes/repository.go b/volumes/repository.go index 08c5849818295..71d6c0ad60afa 100644 --- a/volumes/repository.go +++ b/volumes/repository.go @@ -58,7 +58,7 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { path = filepath.Clean(path) // Ignore the error here since the path may not exist - // Really just want to make sure the path we are using is real(or non-existant) + // Really just want to make sure the path we are using is real(or nonexistent) if cleanPath, err := filepath.EvalSymlinks(path); err == nil { path = cleanPath } @@ -77,7 +77,8 @@ func (r *Repository) newVolume(path string, writable bool) (*Volume, error) { return nil, err } - return v, r.add(v) + r.add(v) + return v, nil } func (r *Repository) restore() error { @@ -103,9 +104,7 @@ func (r *Repository) restore() error { continue } } - if err := r.add(vol); err != nil { - logrus.Debugf("Error restoring volume: %v", err) - } + r.add(vol) } return nil } @@ -125,12 +124,11 @@ func (r *Repository) get(path string) *Volume { return r.volumes[filepath.Clean(path)] } -func (r *Repository) add(volume *Volume) error { +func (r *Repository) add(volume *Volume) { if vol := r.get(volume.Path); vol != nil { - return fmt.Errorf("Volume exists: %s", volume.ID) + return } r.volumes[volume.Path] = volume - return nil } func (r *Repository) Delete(path string) error { diff --git a/volumes/volume.go b/volumes/volume.go index e888f441ed79e..5b3b64601822b 100644 --- a/volumes/volume.go +++ b/volumes/volume.go @@ -2,14 +2,10 @@ package volumes import ( "encoding/json" - "io" - "io/ioutil" "os" - "path" "path/filepath" "sync" - "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/symlink" ) @@ -24,35 +20,6 @@ type Volume struct { lock sync.Mutex } -func (v *Volume) Export(resource, name string) (io.ReadCloser, error) { - if v.IsBindMount && filepath.Base(resource) == name { - name = "" - } - - basePath, err := v.getResourcePath(resource) - if err != nil { - return nil, err - } - stat, err := os.Stat(basePath) - if err != nil { - return nil, err - } - var filter []string - if !stat.IsDir() { - d, f := path.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{path.Base(basePath)} - basePath = path.Dir(basePath) - } - return archive.TarWithOptions(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - Name: name, - IncludeFiles: filter, - }) -} - func (v *Volume) IsDir() (bool, error) { stat, err := os.Stat(v.Path) if err != nil { @@ -113,17 +80,19 @@ func (v *Volume) ToDisk() error { } func (v *Volume) toDisk() error { - data, err := json.Marshal(v) + jsonPath, err := v.jsonPath() if err != nil { return err } - - pth, err := v.jsonPath() + f, err := os.OpenFile(jsonPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return err } - - return ioutil.WriteFile(pth, data, 0666) + if err := json.NewEncoder(f).Encode(v); err != nil { + f.Close() + return err + } + return f.Close() } func (v *Volume) FromDisk() error { @@ -146,14 +115,38 @@ func (v *Volume) FromDisk() error { } func (v *Volume) jsonPath() (string, error) { - return v.getRootResourcePath("config.json") + return v.GetRootResourcePath("config.json") } -func (v *Volume) getRootResourcePath(path string) (string, error) { + +// Evalutes `path` in the scope of the volume's root path, with proper path +// sanitisation. Symlinks are all scoped to the root of the volume, as +// though the volume's root was `/`. +// +// The volume's root path is the host-facing path of the root of the volume's +// mountpoint inside a container. +// +// NOTE: The returned path is *only* safely scoped inside the volume's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (v *Volume) GetResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) - return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) + return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) } -func (v *Volume) getResourcePath(path string) (string, error) { +// Evalutes `path` in the scope of the volume's config path, with proper path +// sanitisation. Symlinks are all scoped to the root of the config path, as +// though the config path was `/`. +// +// The config path of a volume is not exposed to the container and is just used +// to store volume configuration options and other internal information. If in +// doubt, you probably want to just use v.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the volume's config +// path if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (v *Volume) GetRootResourcePath(path string) (string, error) { cleanPath := filepath.Join("/", path) - return symlink.FollowSymlinkInScope(filepath.Join(v.Path, cleanPath), v.Path) + return symlink.FollowSymlinkInScope(filepath.Join(v.configPath, cleanPath), v.configPath) } diff --git a/volumes/volume_test.go b/volumes/volume_test.go index caf38c8bb4289..b30549d379551 100644 --- a/volumes/volume_test.go +++ b/volumes/volume_test.go @@ -1,7 +1,7 @@ package volumes import ( - "strings" + "os" "testing" "github.com/docker/docker/pkg/stringutils" @@ -33,8 +33,8 @@ func TestInitializeCannotMkdirOnNonExistentPath(t *testing.T) { t.Fatal("Expected not to initialize volume with a non existent path") } - if !strings.Contains(err.Error(), "mkdir : no such file or directory") { - t.Fatalf("Expected to get mkdir no such file or directory, got %s", err) + if !os.IsNotExist(err) { + t.Fatalf("Expected to get ErrNotExist error, got %s", err) } } @@ -49,7 +49,7 @@ func TestInitializeCannotStatPathFileNameTooLong(t *testing.T) { t.Fatal("Expected not to initialize volume with a non existent path") } - if !strings.Contains(err.Error(), "file name too long") { - t.Fatalf("Expected to get ENAMETOOLONG error, got %s", err) + if os.IsNotExist(err) { + t.Fatal("Expected to not get ErrNotExist") } }